From 6b9114b45543526a845bcce0728bc9a970322c6e Mon Sep 17 00:00:00 2001 From: Zbynek Roubalik Date: Tue, 2 Aug 2022 22:41:13 +0200 Subject: [PATCH] Reference ScaledObject's/ScaledJob's name in the scalers log Signed-off-by: Zbynek Roubalik --- CHANGELOG.md | 1 + .../keda/scaledobject_controller_test.go | 30 +++--- pkg/scalers/activemq_scaler.go | 10 +- pkg/scalers/artemis_scaler.go | 12 +-- pkg/scalers/aws_cloudwatch_scaler.go | 72 +++++++------- pkg/scalers/aws_cloudwatch_scaler_test.go | 5 +- pkg/scalers/aws_dynamodb_scaler.go | 40 ++++---- pkg/scalers/aws_dynamodb_scaler_test.go | 7 +- pkg/scalers/aws_dynamodb_streams_scaler.go | 18 ++-- .../aws_dynamodb_streams_scaler_test.go | 11 ++- pkg/scalers/aws_kinesis_stream_scaler.go | 18 ++-- pkg/scalers/aws_kinesis_stream_scaler_test.go | 9 +- pkg/scalers/aws_sqs_queue_scaler.go | 29 +++--- pkg/scalers/aws_sqs_queue_scaler_test.go | 9 +- pkg/scalers/azure_app_insights_scaler.go | 20 ++-- pkg/scalers/azure_app_insights_scaler_test.go | 4 +- pkg/scalers/azure_blob_scaler.go | 19 ++-- pkg/scalers/azure_blob_scaler_test.go | 6 +- pkg/scalers/azure_data_explorer_scaler.go | 22 ++--- .../azure_data_explorer_scaler_test.go | 12 ++- pkg/scalers/azure_eventhub_scaler.go | 52 +++++----- pkg/scalers/azure_log_analytics_scaler.go | 26 ++--- pkg/scalers/azure_monitor_scaler.go | 20 ++-- pkg/scalers/azure_monitor_scaler_test.go | 8 +- pkg/scalers/azure_pipelines_scaler.go | 10 +- pkg/scalers/azure_queue_scaler.go | 20 ++-- pkg/scalers/azure_queue_scaler_test.go | 8 +- pkg/scalers/azure_servicebus_scaler.go | 20 ++-- pkg/scalers/azure_servicebus_scaler_test.go | 8 +- pkg/scalers/cassandra_scaler.go | 24 ++--- pkg/scalers/cassandra_scaler_test.go | 7 +- pkg/scalers/cpu_memory_scaler.go | 12 +-- pkg/scalers/cpu_memory_scaler_test.go | 3 +- pkg/scalers/cron_scaler.go | 8 +- pkg/scalers/cron_scaler_test.go | 3 +- pkg/scalers/datadog_scaler.go | 18 ++-- pkg/scalers/datadog_scaler_test.go | 5 +- pkg/scalers/elasticsearch_scaler.go | 22 +++-- pkg/scalers/external_scaler.go | 26 ++--- pkg/scalers/external_scaler_test.go | 2 +- pkg/scalers/gcp_pubsub_scaler.go | 24 ++--- pkg/scalers/gcp_pubsub_scaler_test.go | 12 ++- pkg/scalers/gcp_stackdriver_scaler.go | 38 ++++---- pkg/scalers/gcp_stackdriver_scaler_test.go | 8 +- pkg/scalers/gcp_storage_scaler.go | 30 +++--- pkg/scalers/gcp_storage_scaler_test.go | 8 +- pkg/scalers/graphite_scaler.go | 10 +- pkg/scalers/huawei_cloudeye_scaler.go | 94 +++++++++--------- pkg/scalers/huawei_cloudeye_scaler_test.go | 8 +- pkg/scalers/ibmmq_scaler.go | 3 + pkg/scalers/influxdb_scaler.go | 10 +- pkg/scalers/influxdb_scaler_test.go | 3 +- pkg/scalers/kafka_scaler.go | 22 +++-- pkg/scalers/kafka_scaler_test.go | 12 ++- pkg/scalers/kubernetes_workload_scaler.go | 5 +- .../kubernetes_workload_scaler_test.go | 26 ++--- pkg/scalers/liiklus_scaler.go | 3 + pkg/scalers/liiklus_scaler_test.go | 3 +- pkg/scalers/metrics_api_scaler.go | 8 +- pkg/scalers/mongo_scaler.go | 14 +-- pkg/scalers/mongo_scaler_test.go | 3 +- pkg/scalers/mssql_scaler.go | 20 ++-- pkg/scalers/mysql_scaler.go | 22 +++-- pkg/scalers/newrelic_scaler.go | 19 ++-- pkg/scalers/newrelic_scaler_test.go | 6 +- pkg/scalers/openstack_metrics_scaler.go | 96 ++++++++++--------- pkg/scalers/openstack_metrics_scaler_test.go | 7 +- pkg/scalers/openstack_swift_scaler.go | 28 +++--- pkg/scalers/openstack_swift_scaler_test.go | 3 +- pkg/scalers/postgresql_scaler.go | 20 ++-- pkg/scalers/postgresql_scaler_test.go | 4 +- pkg/scalers/predictkube_scaler.go | 28 +++--- pkg/scalers/prometheus_scaler.go | 24 +++-- pkg/scalers/rabbitmq_scaler.go | 9 +- pkg/scalers/redis_scaler.go | 34 +++---- pkg/scalers/redis_scaler_test.go | 2 + pkg/scalers/redis_streams_scaler.go | 30 +++--- pkg/scalers/redis_streams_scaler_test.go | 3 +- pkg/scalers/scaler.go | 19 +++- pkg/scalers/selenium_grid_scaler.go | 22 +++-- pkg/scalers/selenium_grid_scaler_test.go | 4 +- pkg/scalers/solace_scaler.go | 17 ++-- pkg/scalers/stan_scaler.go | 22 ++--- pkg/scaling/scale_handler.go | 17 ++-- 84 files changed, 791 insertions(+), 665 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index ee242c652e3..b1a1abf7a3a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -47,6 +47,7 @@ To learn more about our roadmap, we recommend reading [this document](ROADMAP.md ### Improvements - **General:** `external` extension reduces connection establishment with long links ([#3193](https://github.com/kedacore/keda/issues/3193)) +- **General:** Reference ScaledObject's/ScaledJob's name in the scalers log ([3419](https://github.com/kedacore/keda/issues/3419)) - **General:** Use `mili` scale for the returned metrics ([#3135](https://github.com/kedacore/keda/issue/3135)) - **General:** Use more readable timestamps in KEDA Operator logs ([#3066](https://github.com/kedacore/keda/issue/3066)) - **AWS SQS Queue Scaler:** Support for scaling to include in-flight messages. ([#3133](https://github.com/kedacore/keda/issues/3133)) diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index aa5aa8ed95c..e88cd228526 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -91,11 +91,11 @@ var _ = Describe("ScaledObjectController", func() { for i, tm := range triggerMeta { config := &scalers.ScalerConfig{ - Name: fmt.Sprintf("test.%d", i), - Namespace: "test", - TriggerMetadata: tm, - ResolvedEnv: nil, - AuthParams: nil, + ScalableObjectName: fmt.Sprintf("test.%d", i), + ScalableObjectNamespace: "test", + TriggerMetadata: tm, + ResolvedEnv: nil, + AuthParams: nil, } s, err := scalers.NewPrometheusScaler(config) @@ -141,11 +141,11 @@ var _ = Describe("ScaledObjectController", func() { expectedExternalMetricNames := make([]string, 0) config := &scalers.ScalerConfig{ - Name: "test", - Namespace: "test", - TriggerMetadata: triggerMeta[0], - ResolvedEnv: nil, - AuthParams: nil, + ScalableObjectName: "test", + ScalableObjectNamespace: "test", + TriggerMetadata: triggerMeta[0], + ResolvedEnv: nil, + AuthParams: nil, } s, err := scalers.NewPrometheusScaler(config) @@ -191,11 +191,11 @@ var _ = Describe("ScaledObjectController", func() { testScalers := make([]cache.ScalerBuilder, 0) for i := 0; i < 4; i++ { config := &scalers.ScalerConfig{ - Name: fmt.Sprintf("test.%d", i), - Namespace: "test", - TriggerMetadata: triggerMeta[0], - ResolvedEnv: nil, - AuthParams: nil, + ScalableObjectName: fmt.Sprintf("test.%d", i), + ScalableObjectNamespace: "test", + TriggerMetadata: triggerMeta[0], + ResolvedEnv: nil, + AuthParams: nil, } s, err := scalers.NewPrometheusScaler(config) diff --git a/pkg/scalers/activemq_scaler.go b/pkg/scalers/activemq_scaler.go index 84ba3f6f5a6..c9532d59fad 100644 --- a/pkg/scalers/activemq_scaler.go +++ b/pkg/scalers/activemq_scaler.go @@ -12,10 +12,10 @@ import ( "strings" "text/template" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -24,6 +24,7 @@ type activeMQScaler struct { metricType v2beta2.MetricTargetType metadata *activeMQMetadata httpClient *http.Client + logger logr.Logger } type activeMQMetadata struct { @@ -52,8 +53,6 @@ const ( defaultActiveMQRestAPITemplate = "http://{{.ManagementEndpoint}}/api/jolokia/read/org.apache.activemq:type=Broker,brokerName={{.BrokerName}},destinationType=Queue,destinationName={{.DestinationName}}/QueueSize" ) -var activeMQLog = logf.Log.WithName("activeMQ_scaler") - // NewActiveMQScaler creates a new activeMQ Scaler func NewActiveMQScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -71,6 +70,7 @@ func NewActiveMQScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, httpClient: httpClient, + logger: InitializeLogger(config, "active_mq_scaler"), }, nil } @@ -170,7 +170,7 @@ func parseActiveMQMetadata(config *ScalerConfig) (*activeMQMetadata, error) { func (s *activeMQScaler) IsActive(ctx context.Context) (bool, error) { queueSize, err := s.getQueueMessageCount(ctx) if err != nil { - activeMQLog.Error(err, "Unable to access activeMQ management endpoint", "managementEndpoint", s.metadata.managementEndpoint) + s.logger.Error(err, "Unable to access activeMQ management endpoint", "managementEndpoint", s.metadata.managementEndpoint) return false, err } @@ -260,7 +260,7 @@ func (s *activeMQScaler) getQueueMessageCount(ctx context.Context) (int64, error return -1, fmt.Errorf("ActiveMQ management endpoint response error code : %d %d", resp.StatusCode, monitoringInfo.Status) } - activeMQLog.V(1).Info(fmt.Sprintf("ActiveMQ scaler: Providing metrics based on current queue size %d queue size limit %d", queueMessageCount, s.metadata.targetQueueSize)) + s.logger.V(1).Info(fmt.Sprintf("ActiveMQ scaler: Providing metrics based on current queue size %d queue size limit %d", queueMessageCount, s.metadata.targetQueueSize)) return queueMessageCount, nil } diff --git a/pkg/scalers/artemis_scaler.go b/pkg/scalers/artemis_scaler.go index ca01ed075d6..441aeb38837 100644 --- a/pkg/scalers/artemis_scaler.go +++ b/pkg/scalers/artemis_scaler.go @@ -10,10 +10,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -22,6 +22,7 @@ type artemisScaler struct { metricType v2beta2.MetricTargetType metadata *artemisMetadata httpClient *http.Client + logger logr.Logger } //revive:disable:var-naming breaking change on restApiTemplate, wouldn't bring any benefit to users @@ -55,8 +56,6 @@ const ( defaultCorsHeader = "http://%s" ) -var artemisLog = logf.Log.WithName("artemis_queue_scaler") - // NewArtemisQueueScaler creates a new artemis queue Scaler func NewArtemisQueueScaler(config *ScalerConfig) (Scaler, error) { // do we need to guarantee this timeout for a specific @@ -78,6 +77,7 @@ func NewArtemisQueueScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: artemisMetadata, httpClient: httpClient, + logger: InitializeLogger(config, "artemis_queue_scaler"), }, nil } @@ -181,7 +181,7 @@ func parseArtemisMetadata(config *ScalerConfig) (*artemisMetadata, error) { func (s *artemisScaler) IsActive(ctx context.Context) (bool, error) { messages, err := s.getQueueMessageCount(ctx) if err != nil { - artemisLog.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) + s.logger.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) return false, err } @@ -262,7 +262,7 @@ func (s *artemisScaler) getQueueMessageCount(ctx context.Context) (int64, error) return -1, fmt.Errorf("artemis management endpoint response error code : %d %d", resp.StatusCode, monitoringInfo.Status) } - artemisLog.V(1).Info(fmt.Sprintf("Artemis scaler: Providing metrics based on current queue length %d queue length limit %d", messageCount, s.metadata.queueLength)) + s.logger.V(1).Info(fmt.Sprintf("Artemis scaler: Providing metrics based on current queue length %d queue length limit %d", messageCount, s.metadata.queueLength)) return messageCount, nil } @@ -283,7 +283,7 @@ func (s *artemisScaler) GetMetrics(ctx context.Context, metricName string, metri messages, err := s.getQueueMessageCount(ctx) if err != nil { - artemisLog.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) + s.logger.Error(err, "Unable to access the artemis management endpoint", "managementEndpoint", s.metadata.managementEndpoint) return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/aws_cloudwatch_scaler.go b/pkg/scalers/aws_cloudwatch_scaler.go index cd00829123f..1f2b15b06f1 100644 --- a/pkg/scalers/aws_cloudwatch_scaler.go +++ b/pkg/scalers/aws_cloudwatch_scaler.go @@ -13,10 +13,10 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -32,6 +32,7 @@ type awsCloudwatchScaler struct { metricType v2beta2.MetricTargetType metadata *awsCloudwatchMetadata cwClient cloudwatchiface.CloudWatchAPI + logger logr.Logger } type awsCloudwatchMetadata struct { @@ -58,8 +59,6 @@ type awsCloudwatchMetadata struct { scalerIndex int } -var cloudwatchLog = logf.Log.WithName("aws_cloudwatch_scaler") - // NewAwsCloudwatchScaler creates a new awsCloudwatchScaler func NewAwsCloudwatchScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -76,6 +75,7 @@ func NewAwsCloudwatchScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, cwClient: createCloudwatchClient(meta), + logger: InitializeLogger(config, "aws_cloudwatch_scaler"), }, nil } @@ -290,11 +290,11 @@ func computeQueryWindow(current time.Time, metricPeriodSec, metricEndTimeOffsetS return } -func (c *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - metricValue, err := c.GetCloudwatchMetrics() +func (s *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { + metricValue, err := s.GetCloudwatchMetrics() if err != nil { - cloudwatchLog.Error(err, "Error getting metric value") + s.logger.Error(err, "Error getting metric value") return []external_metrics.ExternalMetricValue{}, err } @@ -303,70 +303,70 @@ func (c *awsCloudwatchScaler) GetMetrics(ctx context.Context, metricName string, return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (c *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { +func (s *awsCloudwatchScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { var metricNameSuffix string - if c.metadata.expression != "" { - metricNameSuffix = c.metadata.metricsName + if s.metadata.expression != "" { + metricNameSuffix = s.metadata.metricsName } else { - metricNameSuffix = c.metadata.dimensionName[0] + metricNameSuffix = s.metadata.dimensionName[0] } externalMetric := &v2beta2.ExternalMetricSource{ Metric: v2beta2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(c.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-cloudwatch-%s", metricNameSuffix))), + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("aws-cloudwatch-%s", metricNameSuffix))), }, - Target: GetMetricTargetMili(c.metricType, c.metadata.targetMetricValue), + Target: GetMetricTargetMili(s.metricType, s.metadata.targetMetricValue), } metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2beta2.MetricSpec{metricSpec} } -func (c *awsCloudwatchScaler) IsActive(ctx context.Context) (bool, error) { - val, err := c.GetCloudwatchMetrics() +func (s *awsCloudwatchScaler) IsActive(ctx context.Context) (bool, error) { + val, err := s.GetCloudwatchMetrics() if err != nil { return false, err } - return val > c.metadata.activationTargetMetricValue, nil + return val > s.metadata.activationTargetMetricValue, nil } -func (c *awsCloudwatchScaler) Close(context.Context) error { +func (s *awsCloudwatchScaler) Close(context.Context) error { return nil } -func (c *awsCloudwatchScaler) GetCloudwatchMetrics() (float64, error) { +func (s *awsCloudwatchScaler) GetCloudwatchMetrics() (float64, error) { var input cloudwatch.GetMetricDataInput - startTime, endTime := computeQueryWindow(time.Now(), c.metadata.metricStatPeriod, c.metadata.metricEndTimeOffset, c.metadata.metricCollectionTime) + startTime, endTime := computeQueryWindow(time.Now(), s.metadata.metricStatPeriod, s.metadata.metricEndTimeOffset, s.metadata.metricCollectionTime) - if c.metadata.expression != "" { + if s.metadata.expression != "" { input = cloudwatch.GetMetricDataInput{ StartTime: aws.Time(startTime), EndTime: aws.Time(endTime), ScanBy: aws.String(cloudwatch.ScanByTimestampDescending), MetricDataQueries: []*cloudwatch.MetricDataQuery{ { - Expression: aws.String(c.metadata.expression), + Expression: aws.String(s.metadata.expression), Id: aws.String("q1"), - Period: aws.Int64(c.metadata.metricStatPeriod), - Label: aws.String(c.metadata.metricsName), + Period: aws.Int64(s.metadata.metricStatPeriod), + Label: aws.String(s.metadata.metricsName), }, }, } } else { dimensions := []*cloudwatch.Dimension{} - for i := range c.metadata.dimensionName { + for i := range s.metadata.dimensionName { dimensions = append(dimensions, &cloudwatch.Dimension{ - Name: &c.metadata.dimensionName[i], - Value: &c.metadata.dimensionValue[i], + Name: &s.metadata.dimensionName[i], + Value: &s.metadata.dimensionValue[i], }) } var metricUnit *string - if c.metadata.metricUnit != "" { - metricUnit = aws.String(c.metadata.metricUnit) + if s.metadata.metricUnit != "" { + metricUnit = aws.String(s.metadata.metricUnit) } input = cloudwatch.GetMetricDataInput{ @@ -378,12 +378,12 @@ func (c *awsCloudwatchScaler) GetCloudwatchMetrics() (float64, error) { Id: aws.String("c1"), MetricStat: &cloudwatch.MetricStat{ Metric: &cloudwatch.Metric{ - Namespace: aws.String(c.metadata.namespace), + Namespace: aws.String(s.metadata.namespace), Dimensions: dimensions, - MetricName: aws.String(c.metadata.metricsName), + MetricName: aws.String(s.metadata.metricsName), }, - Period: aws.Int64(c.metadata.metricStatPeriod), - Stat: aws.String(c.metadata.metricStat), + Period: aws.Int64(s.metadata.metricStatPeriod), + Stat: aws.String(s.metadata.metricStat), Unit: metricUnit, }, ReturnData: aws.Bool(true), @@ -392,20 +392,20 @@ func (c *awsCloudwatchScaler) GetCloudwatchMetrics() (float64, error) { } } - output, err := c.cwClient.GetMetricData(&input) + output, err := s.cwClient.GetMetricData(&input) if err != nil { - cloudwatchLog.Error(err, "Failed to get output") + s.logger.Error(err, "Failed to get output") return -1, err } - cloudwatchLog.V(1).Info("Received Metric Data", "data", output) + s.logger.V(1).Info("Received Metric Data", "data", output) var metricValue float64 if len(output.MetricDataResults) > 0 && len(output.MetricDataResults[0].Values) > 0 { metricValue = *output.MetricDataResults[0].Values[0] } else { - cloudwatchLog.Info("empty metric data received, returning minMetricValue") - metricValue = c.metadata.minMetricValue + s.logger.Info("empty metric data received, returning minMetricValue") + metricValue = s.metadata.minMetricValue } return metricValue, nil diff --git a/pkg/scalers/aws_cloudwatch_scaler_test.go b/pkg/scalers/aws_cloudwatch_scaler_test.go index 33e9509f5d2..f15e06c41a4 100644 --- a/pkg/scalers/aws_cloudwatch_scaler_test.go +++ b/pkg/scalers/aws_cloudwatch_scaler_test.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/cloudwatch" "github.com/aws/aws-sdk-go/service/cloudwatch/cloudwatchiface" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/labels" ) @@ -487,7 +488,7 @@ func TestAWSCloudwatchGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockAWSCloudwatchScaler := awsCloudwatchScaler{"", meta, &mockCloudwatch{}} + mockAWSCloudwatchScaler := awsCloudwatchScaler{"", meta, &mockCloudwatch{}, logr.Logger{}} metricSpec := mockAWSCloudwatchScaler.GetMetricSpecForScaling(ctx) metricName := metricSpec[0].External.Metric.Name @@ -500,7 +501,7 @@ func TestAWSCloudwatchGetMetricSpecForScaling(t *testing.T) { func TestAWSCloudwatchScalerGetMetrics(t *testing.T) { var selector labels.Selector for _, meta := range awsCloudwatchGetMetricTestData { - mockAWSCloudwatchScaler := awsCloudwatchScaler{"", &meta, &mockCloudwatch{}} + mockAWSCloudwatchScaler := awsCloudwatchScaler{"", &meta, &mockCloudwatch{}, logr.Logger{}} value, err := mockAWSCloudwatchScaler.GetMetrics(context.Background(), meta.metricsName, selector) switch meta.metricsName { case testAWSCloudwatchErrorMetric: diff --git a/pkg/scalers/aws_dynamodb_scaler.go b/pkg/scalers/aws_dynamodb_scaler.go index 91ceb13bafe..352f67ef96c 100644 --- a/pkg/scalers/aws_dynamodb_scaler.go +++ b/pkg/scalers/aws_dynamodb_scaler.go @@ -13,11 +13,11 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/go-logr/logr" "go.mongodb.org/mongo-driver/bson" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -26,6 +26,7 @@ type awsDynamoDBScaler struct { metricType v2beta2.MetricTargetType metadata *awsDynamoDBMetadata dbClient dynamodbiface.DynamoDBAPI + logger logr.Logger } type awsDynamoDBMetadata struct { @@ -41,8 +42,6 @@ type awsDynamoDBMetadata struct { metricName string } -var dynamoDBLog = logf.Log.WithName("aws_dynamodb_scaler") - func NewAwsDynamoDBScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { @@ -58,6 +57,7 @@ func NewAwsDynamoDBScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, dbClient: createDynamoDBClient(meta), + logger: InitializeLogger(config, "aws_dynamodb_scaler"), }, nil } @@ -171,10 +171,10 @@ func createDynamoDBClient(meta *awsDynamoDBMetadata) *dynamodb.DynamoDB { return dbClient } -func (c *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - metricValue, err := c.GetQueryMetrics() +func (s *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { + metricValue, err := s.GetQueryMetrics() if err != nil { - dynamoDBLog.Error(err, "Error getting metric value") + s.logger.Error(err, "Error getting metric value") return []external_metrics.ExternalMetricValue{}, err } @@ -183,12 +183,12 @@ func (c *awsDynamoDBScaler) GetMetrics(ctx context.Context, metricName string, m return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (c *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { +func (s *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { externalMetric := &v2beta2.ExternalMetricSource{ Metric: v2beta2.MetricIdentifier{ - Name: c.metadata.metricName, + Name: s.metadata.metricName, }, - Target: GetMetricTarget(c.metricType, c.metadata.targetValue), + Target: GetMetricTarget(s.metricType, s.metadata.targetValue), } metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} @@ -197,30 +197,30 @@ func (c *awsDynamoDBScaler) GetMetricSpecForScaling(context.Context) []v2beta2.M } } -func (c *awsDynamoDBScaler) IsActive(ctx context.Context) (bool, error) { - messages, err := c.GetQueryMetrics() +func (s *awsDynamoDBScaler) IsActive(ctx context.Context) (bool, error) { + messages, err := s.GetQueryMetrics() if err != nil { return false, fmt.Errorf("error inspecting aws-dynamodb: %s", err) } - return messages > float64(c.metadata.activationTargetValue), nil + return messages > float64(s.metadata.activationTargetValue), nil } -func (c *awsDynamoDBScaler) Close(context.Context) error { +func (s *awsDynamoDBScaler) Close(context.Context) error { return nil } -func (c *awsDynamoDBScaler) GetQueryMetrics() (float64, error) { +func (s *awsDynamoDBScaler) GetQueryMetrics() (float64, error) { dimensions := dynamodb.QueryInput{ - TableName: aws.String(c.metadata.tableName), - KeyConditionExpression: aws.String(c.metadata.keyConditionExpression), - ExpressionAttributeNames: c.metadata.expressionAttributeNames, - ExpressionAttributeValues: c.metadata.expressionAttributeValues, + TableName: aws.String(s.metadata.tableName), + KeyConditionExpression: aws.String(s.metadata.keyConditionExpression), + ExpressionAttributeNames: s.metadata.expressionAttributeNames, + ExpressionAttributeValues: s.metadata.expressionAttributeValues, } - res, err := c.dbClient.Query(&dimensions) + res, err := s.dbClient.Query(&dimensions) if err != nil { - dynamoDBLog.Error(err, "Failed to get output") + s.logger.Error(err, "Failed to get output") return 0, err } diff --git a/pkg/scalers/aws_dynamodb_scaler_test.go b/pkg/scalers/aws_dynamodb_scaler_test.go index 79fdda512f3..6099bfdacf7 100644 --- a/pkg/scalers/aws_dynamodb_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_scaler_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb" "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/labels" ) @@ -275,7 +276,7 @@ func TestDynamoGetMetrics(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { t.Run(meta.tableName, func(t *testing.T) { - scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}} + scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Logger{}} value, err := scaler.GetMetrics(context.Background(), "aws-dynamodb", selector) switch meta.tableName { @@ -293,7 +294,7 @@ func TestDynamoGetMetrics(t *testing.T) { func TestDynamoGetQueryMetrics(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { t.Run(meta.tableName, func(t *testing.T) { - scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}} + scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Logger{}} value, err := scaler.GetQueryMetrics() switch meta.tableName { @@ -311,7 +312,7 @@ func TestDynamoGetQueryMetrics(t *testing.T) { func TestDynamoIsActive(t *testing.T) { for _, meta := range awsDynamoDBGetMetricTestData { t.Run(meta.tableName, func(t *testing.T) { - scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}} + scaler := awsDynamoDBScaler{"", &meta, &mockDynamoDB{}, logr.Logger{}} value, err := scaler.IsActive(context.Background()) switch meta.tableName { diff --git a/pkg/scalers/aws_dynamodb_streams_scaler.go b/pkg/scalers/aws_dynamodb_streams_scaler.go index 3ed37f1c4df..2ab79caf628 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler.go @@ -13,12 +13,12 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/aws/aws-sdk-go/service/dynamodbstreams" "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -33,6 +33,7 @@ type awsDynamoDBStreamsScaler struct { metadata *awsDynamoDBStreamsMetadata streamArn *string dbStreamClient dynamodbstreamsiface.DynamoDBStreamsAPI + logger logr.Logger } type awsDynamoDBStreamsMetadata struct { @@ -44,8 +45,6 @@ type awsDynamoDBStreamsMetadata struct { scalerIndex int } -var dynamodbStreamLog = logf.Log.WithName("aws_dynamodb_streams_scaler") - // NewAwsDynamoDBStreamsScaler creates a new awsDynamoDBStreamsScaler func NewAwsDynamoDBStreamsScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -53,7 +52,9 @@ func NewAwsDynamoDBStreamsScaler(ctx context.Context, config *ScalerConfig) (Sca return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAwsDynamoDBStreamsMetadata(config) + logger := InitializeLogger(config, "aws_dynamodb_streams_scaler") + + meta, err := parseAwsDynamoDBStreamsMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing dynamodb stream metadata: %s", err) } @@ -70,10 +71,11 @@ func NewAwsDynamoDBStreamsScaler(ctx context.Context, config *ScalerConfig) (Sca metadata: meta, streamArn: streamArn, dbStreamClient: dbStreamClient, + logger: logger, }, nil } -func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig) (*awsDynamoDBStreamsMetadata, error) { +func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig, logger logr.Logger) (*awsDynamoDBStreamsMetadata, error) { meta := awsDynamoDBStreamsMetadata{} meta.targetShardCount = defaultTargetDBStreamsShardCount @@ -93,7 +95,7 @@ func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig) (*awsDynamoDBStreamsM shardCount, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.targetShardCount = defaultTargetDBStreamsShardCount - dynamodbStreamLog.Error(err, "error parsing dyanmodb stream metadata shardCount, using default %n", defaultTargetDBStreamsShardCount) + logger.Error(err, "error parsing dyanmodb stream metadata shardCount, using default %n", defaultTargetDBStreamsShardCount) } else { meta.targetShardCount = shardCount } @@ -102,7 +104,7 @@ func parseAwsDynamoDBStreamsMetadata(config *ScalerConfig) (*awsDynamoDBStreamsM shardCount, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.activationTargetShardCount = defaultActivationTargetDBStreamsShardCount - dynamodbStreamLog.Error(err, "error parsing dyanmodb stream metadata activationTargetShardCount, using default %n", defaultActivationTargetDBStreamsShardCount) + logger.Error(err, "error parsing dyanmodb stream metadata activationTargetShardCount, using default %n", defaultActivationTargetDBStreamsShardCount) } else { meta.activationTargetShardCount = shardCount } @@ -193,7 +195,7 @@ func (s *awsDynamoDBStreamsScaler) GetMetrics(ctx context.Context, metricName st shardCount, err := s.GetDynamoDBStreamShardCount(ctx) if err != nil { - dynamodbStreamLog.Error(err, "error getting shard count") + s.logger.Error(err, "error getting shard count") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/aws_dynamodb_streams_scaler_test.go b/pkg/scalers/aws_dynamodb_streams_scaler_test.go index b06c25425e9..251afd3332a 100644 --- a/pkg/scalers/aws_dynamodb_streams_scaler_test.go +++ b/pkg/scalers/aws_dynamodb_streams_scaler_test.go @@ -13,6 +13,7 @@ import ( "github.com/aws/aws-sdk-go/service/dynamodb/dynamodbiface" "github.com/aws/aws-sdk-go/service/dynamodbstreams" "github.com/aws/aws-sdk-go/service/dynamodbstreams/dynamodbstreamsiface" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" @@ -352,7 +353,7 @@ var awsDynamoDBStreamsGetMetricTestData = []*awsDynamoDBStreamsMetadata{ func TestParseAwsDynamoDBStreamsMetadata(t *testing.T) { for _, testData := range testAwsDynamoDBStreamMetadata { - result, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}) + result, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil && !testData.isError { t.Errorf("Expected success because %s got error, %s", testData.comment, err) } @@ -369,7 +370,7 @@ func TestParseAwsDynamoDBStreamsMetadata(t *testing.T) { func TestAwsDynamoDBStreamsGetMetricSpecForScaling(t *testing.T) { for _, testData := range awsDynamoDBStreamMetricIdentifiers { ctx := context.Background() - meta, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseAwsDynamoDBStreamsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAwsDynamoDBStreamAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } @@ -377,7 +378,7 @@ func TestAwsDynamoDBStreamsGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not get dynamodb stream arn:", err) } - mockAwsDynamoDBStreamsScaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}} + mockAwsDynamoDBStreamsScaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Logger{}} metricSpec := mockAwsDynamoDBStreamsScaler.GetMetricSpecForScaling(ctx) metricName := metricSpec[0].External.Metric.Name if metricName != testData.name { @@ -395,7 +396,7 @@ func TestAwsDynamoDBStreamsScalerGetMetrics(t *testing.T) { ctx := context.Background() streamArn, err = getDynamoDBStreamsArn(ctx, &mockAwsDynamoDB{}, &meta.tableName) if err == nil { - scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}} + scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Logger{}} value, err = scaler.GetMetrics(context.Background(), "MetricName", selector) } switch meta.tableName { @@ -419,7 +420,7 @@ func TestAwsDynamoDBStreamsScalerIsActive(t *testing.T) { ctx := context.Background() streamArn, err = getDynamoDBStreamsArn(ctx, &mockAwsDynamoDB{}, &meta.tableName) if err == nil { - scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}} + scaler := awsDynamoDBStreamsScaler{"", meta, streamArn, &mockAwsDynamoDBStreams{}, logr.Logger{}} value, err = scaler.IsActive(context.Background()) } switch meta.tableName { diff --git a/pkg/scalers/aws_kinesis_stream_scaler.go b/pkg/scalers/aws_kinesis_stream_scaler.go index 6ec53c8cf13..18d88e72319 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler.go +++ b/pkg/scalers/aws_kinesis_stream_scaler.go @@ -11,10 +11,10 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -28,6 +28,7 @@ type awsKinesisStreamScaler struct { metricType v2beta2.MetricTargetType metadata *awsKinesisStreamMetadata kinesisClient kinesisiface.KinesisAPI + logger logr.Logger } type awsKinesisStreamMetadata struct { @@ -39,8 +40,6 @@ type awsKinesisStreamMetadata struct { scalerIndex int } -var kinesisStreamLog = logf.Log.WithName("aws_kinesis_stream_scaler") - // NewAwsKinesisStreamScaler creates a new awsKinesisStreamScaler func NewAwsKinesisStreamScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -48,7 +47,9 @@ func NewAwsKinesisStreamScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAwsKinesisStreamMetadata(config) + logger := InitializeLogger(config, "aws_kinesis_stream_scaler") + + meta, err := parseAwsKinesisStreamMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing Kinesis stream metadata: %s", err) } @@ -57,10 +58,11 @@ func NewAwsKinesisStreamScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, kinesisClient: createKinesisClient(meta), + logger: logger, }, nil } -func parseAwsKinesisStreamMetadata(config *ScalerConfig) (*awsKinesisStreamMetadata, error) { +func parseAwsKinesisStreamMetadata(config *ScalerConfig, logger logr.Logger) (*awsKinesisStreamMetadata, error) { meta := awsKinesisStreamMetadata{} meta.targetShardCount = targetShardCountDefault @@ -68,7 +70,7 @@ func parseAwsKinesisStreamMetadata(config *ScalerConfig) (*awsKinesisStreamMetad shardCount, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.targetShardCount = targetShardCountDefault - kinesisStreamLog.Error(err, "Error parsing Kinesis stream metadata shardCount, using default %n", targetShardCountDefault) + logger.Error(err, "Error parsing Kinesis stream metadata shardCount, using default %n", targetShardCountDefault) } else { meta.targetShardCount = shardCount } @@ -78,7 +80,7 @@ func parseAwsKinesisStreamMetadata(config *ScalerConfig) (*awsKinesisStreamMetad activationShardCount, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.activationTargetShardCount = activationTargetShardCountDefault - kinesisStreamLog.Error(err, "Error parsing Kinesis stream metadata activationShardCount, using default %n", activationTargetShardCountDefault) + logger.Error(err, "Error parsing Kinesis stream metadata activationShardCount, using default %n", activationTargetShardCountDefault) } else { meta.activationTargetShardCount = activationShardCount } @@ -164,7 +166,7 @@ func (s *awsKinesisStreamScaler) GetMetrics(ctx context.Context, metricName stri shardCount, err := s.GetAwsKinesisOpenShardCount() if err != nil { - kinesisStreamLog.Error(err, "Error getting shard count") + s.logger.Error(err, "Error getting shard count") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/aws_kinesis_stream_scaler_test.go b/pkg/scalers/aws_kinesis_stream_scaler_test.go index ff6a0a40acf..d27d393a335 100644 --- a/pkg/scalers/aws_kinesis_stream_scaler_test.go +++ b/pkg/scalers/aws_kinesis_stream_scaler_test.go @@ -9,6 +9,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/kinesis" "github.com/aws/aws-sdk-go/service/kinesis/kinesisiface" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/labels" ) @@ -291,7 +292,7 @@ var awsKinesisGetMetricTestData = []*awsKinesisStreamMetadata{ func TestKinesisParseMetadata(t *testing.T) { for _, testData := range testAWSKinesisMetadata { - result, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}) + result, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil && !testData.isError { t.Errorf("Expected success because %s got error, %s", testData.comment, err) } @@ -308,11 +309,11 @@ func TestKinesisParseMetadata(t *testing.T) { func TestAWSKinesisGetMetricSpecForScaling(t *testing.T) { for _, testData := range awsKinesisMetricIdentifiers { ctx := context.Background() - meta, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseAwsKinesisStreamMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSKinesisAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockAWSKinesisStreamScaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}} + mockAWSKinesisStreamScaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}, logr.Logger{}} metricSpec := mockAWSKinesisStreamScaler.GetMetricSpecForScaling(ctx) metricName := metricSpec[0].External.Metric.Name @@ -325,7 +326,7 @@ func TestAWSKinesisGetMetricSpecForScaling(t *testing.T) { func TestAWSKinesisStreamScalerGetMetrics(t *testing.T) { var selector labels.Selector for _, meta := range awsKinesisGetMetricTestData { - scaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}} + scaler := awsKinesisStreamScaler{"", meta, &mockKinesis{}, logr.Logger{}} value, err := scaler.GetMetrics(context.Background(), "MetricName", selector) switch meta.streamName { case testAWSKinesisErrorStream: diff --git a/pkg/scalers/aws_sqs_queue_scaler.go b/pkg/scalers/aws_sqs_queue_scaler.go index 854b83db440..1c3aa287809 100644 --- a/pkg/scalers/aws_sqs_queue_scaler.go +++ b/pkg/scalers/aws_sqs_queue_scaler.go @@ -13,10 +13,10 @@ import ( "github.com/aws/aws-sdk-go/aws/session" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -27,18 +27,16 @@ const ( defaultScaleOnInFlight = true ) -var ( - awsSqsQueueMetricNames = []string{ - "ApproximateNumberOfMessages", - "ApproximateNumberOfMessagesNotVisible", - } - sqsQueueLog = logf.Log.WithName("aws_sqs_queue_scaler") -) +var awsSqsQueueMetricNames = []string{ + "ApproximateNumberOfMessages", + "ApproximateNumberOfMessagesNotVisible", +} type awsSqsQueueScaler struct { metricType v2beta2.MetricTargetType metadata *awsSqsQueueMetadata sqsClient sqsiface.SQSAPI + logger logr.Logger } type awsSqsQueueMetadata struct { @@ -59,7 +57,9 @@ func NewAwsSqsQueueScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAwsSqsQueueMetadata(config) + logger := InitializeLogger(config, "aws_sqs_queue_scaler") + + meta, err := parseAwsSqsQueueMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing SQS queue metadata: %s", err) } @@ -68,10 +68,11 @@ func NewAwsSqsQueueScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, sqsClient: createSqsClient(meta), + logger: logger, }, nil } -func parseAwsSqsQueueMetadata(config *ScalerConfig) (*awsSqsQueueMetadata, error) { +func parseAwsSqsQueueMetadata(config *ScalerConfig, logger logr.Logger) (*awsSqsQueueMetadata, error) { meta := awsSqsQueueMetadata{} meta.targetQueueLength = defaultTargetQueueLength meta.scaleOnInFlight = defaultScaleOnInFlight @@ -80,7 +81,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig) (*awsSqsQueueMetadata, error queueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.targetQueueLength = targetQueueLengthDefault - sqsQueueLog.Error(err, "Error parsing SQS queue metadata queueLength, using default %n", targetQueueLengthDefault) + logger.Error(err, "Error parsing SQS queue metadata queueLength, using default %n", targetQueueLengthDefault) } else { meta.targetQueueLength = queueLength } @@ -90,7 +91,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig) (*awsSqsQueueMetadata, error activationQueueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { meta.activationTargetQueueLength = activationTargetQueueLengthDefault - sqsQueueLog.Error(err, "Error parsing SQS queue metadata activationQueueLength, using default %n", activationTargetQueueLengthDefault) + logger.Error(err, "Error parsing SQS queue metadata activationQueueLength, using default %n", activationTargetQueueLengthDefault) } else { meta.activationTargetQueueLength = activationQueueLength } @@ -100,7 +101,7 @@ func parseAwsSqsQueueMetadata(config *ScalerConfig) (*awsSqsQueueMetadata, error scaleOnInFlight, err := strconv.ParseBool(val) if err != nil { meta.scaleOnInFlight = defaultScaleOnInFlight - sqsQueueLog.Error(err, "Error parsing SQS queue metadata scaleOnInFlight, using default %n", defaultScaleOnInFlight) + logger.Error(err, "Error parsing SQS queue metadata scaleOnInFlight, using default %n", defaultScaleOnInFlight) } else { meta.scaleOnInFlight = scaleOnInFlight } @@ -206,7 +207,7 @@ func (s *awsSqsQueueScaler) GetMetrics(ctx context.Context, metricName string, m queuelen, err := s.getAwsSqsQueueLength() if err != nil { - sqsQueueLog.Error(err, "Error getting queue length") + s.logger.Error(err, "Error getting queue length") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/aws_sqs_queue_scaler_test.go b/pkg/scalers/aws_sqs_queue_scaler_test.go index 2d8593a2ae2..87db3af2a02 100644 --- a/pkg/scalers/aws_sqs_queue_scaler_test.go +++ b/pkg/scalers/aws_sqs_queue_scaler_test.go @@ -8,6 +8,7 @@ import ( "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/sqs" "github.com/aws/aws-sdk-go/service/sqs/sqsiface" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/apimachinery/pkg/labels" ) @@ -254,7 +255,7 @@ var awsSQSGetMetricTestData = []*awsSqsQueueMetadata{ func TestSQSParseMetadata(t *testing.T) { for _, testData := range testAWSSQSMetadata { - _, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSSQSAuthentication, AuthParams: testData.authParams}) + _, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testAWSSQSAuthentication, AuthParams: testData.authParams}, logr.Logger{}) if err != nil && !testData.isError { t.Errorf("Expected success because %s got error, %s", testData.comment, err) } @@ -267,11 +268,11 @@ func TestSQSParseMetadata(t *testing.T) { func TestAWSSQSGetMetricSpecForScaling(t *testing.T) { for _, testData := range awsSQSMetricIdentifiers { ctx := context.Background() - meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSSQSAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseAwsSqsQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testAWSSQSAuthentication, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockAWSSQSScaler := awsSqsQueueScaler{"", meta, &mockSqs{}} + mockAWSSQSScaler := awsSqsQueueScaler{"", meta, &mockSqs{}, logr.Logger{}} metricSpec := mockAWSSQSScaler.GetMetricSpecForScaling(ctx) metricName := metricSpec[0].External.Metric.Name @@ -284,7 +285,7 @@ func TestAWSSQSGetMetricSpecForScaling(t *testing.T) { func TestAWSSQSScalerGetMetrics(t *testing.T) { var selector labels.Selector for _, meta := range awsSQSGetMetricTestData { - scaler := awsSqsQueueScaler{"", meta, &mockSqs{}} + scaler := awsSqsQueueScaler{"", meta, &mockSqs{}, logr.Logger{}} value, err := scaler.GetMetrics(context.Background(), "MetricName", selector) switch meta.queueURL { case testAWSSQSErrorQueueURL: diff --git a/pkg/scalers/azure_app_insights_scaler.go b/pkg/scalers/azure_app_insights_scaler.go index 122b95e4348..e03090772bf 100644 --- a/pkg/scalers/azure_app_insights_scaler.go +++ b/pkg/scalers/azure_app_insights_scaler.go @@ -6,10 +6,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -34,12 +34,11 @@ type azureAppInsightsMetadata struct { scalerIndex int } -var azureAppInsightsLog = logf.Log.WithName("azure_app_insights_scaler") - type azureAppInsightsScaler struct { metricType v2beta2.MetricTargetType metadata *azureAppInsightsMetadata podIdentity kedav1alpha1.AuthPodIdentity + logger logr.Logger } // NewAzureAppInsightsScaler creates a new AzureAppInsightsScaler @@ -49,7 +48,9 @@ func NewAzureAppInsightsScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAzureAppInsightsMetadata(config) + logger := InitializeLogger(config, "azure_app_insights_scaler") + + meta, err := parseAzureAppInsightsMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing azure app insights metadata: %s", err) } @@ -58,10 +59,11 @@ func NewAzureAppInsightsScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, podIdentity: config.PodIdentity, + logger: logger, }, nil } -func parseAzureAppInsightsMetadata(config *ScalerConfig) (*azureAppInsightsMetadata, error) { +func parseAzureAppInsightsMetadata(config *ScalerConfig, logger logr.Logger) (*azureAppInsightsMetadata, error) { meta := azureAppInsightsMetadata{ azureAppInsightsInfo: azure.AppInsightsInfo{}, } @@ -72,7 +74,7 @@ func parseAzureAppInsightsMetadata(config *ScalerConfig) (*azureAppInsightsMetad } meta.targetValue, err = strconv.ParseFloat(val, 64) if err != nil { - azureAppInsightsLog.Error(err, "Error parsing azure app insights metadata", azureAppInsightsTargetValueName, azureAppInsightsTargetValueName) + logger.Error(err, "Error parsing azure app insights metadata", azureAppInsightsTargetValueName, azureAppInsightsTargetValueName) return nil, fmt.Errorf("error parsing azure app insights metadata %s: %s", azureAppInsightsTargetValueName, err.Error()) } @@ -81,7 +83,7 @@ func parseAzureAppInsightsMetadata(config *ScalerConfig) (*azureAppInsightsMetad if err == nil { meta.activationTargetValue, err = strconv.ParseFloat(val, 64) if err != nil { - azureAppInsightsLog.Error(err, "Error parsing azure app insights metadata", azureAppInsightsActivationTargetValueName, azureAppInsightsActivationTargetValueName) + logger.Error(err, "Error parsing azure app insights metadata", azureAppInsightsActivationTargetValueName, azureAppInsightsActivationTargetValueName) return nil, fmt.Errorf("error parsing azure app insights metadata %s: %s", azureAppInsightsActivationTargetValueName, err.Error()) } } @@ -166,7 +168,7 @@ func parseAzureAppInsightsMetadata(config *ScalerConfig) (*azureAppInsightsMetad func (s *azureAppInsightsScaler) IsActive(ctx context.Context) (bool, error) { val, err := azure.GetAzureAppInsightsMetricValue(ctx, s.metadata.azureAppInsightsInfo, s.podIdentity) if err != nil { - azureAppInsightsLog.Error(err, "error getting azure app insights metric") + s.logger.Error(err, "error getting azure app insights metric") return false, err } @@ -192,7 +194,7 @@ func (s *azureAppInsightsScaler) GetMetricSpecForScaling(context.Context) []v2be func (s *azureAppInsightsScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { val, err := azure.GetAzureAppInsightsMetricValue(ctx, s.metadata.azureAppInsightsInfo, s.podIdentity) if err != nil { - azureAppInsightsLog.Error(err, "error getting azure app insights metric") + s.logger.Error(err, "error getting azure app insights metric") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/azure_app_insights_scaler_test.go b/pkg/scalers/azure_app_insights_scaler_test.go index d84ca710075..48169f7f075 100644 --- a/pkg/scalers/azure_app_insights_scaler_test.go +++ b/pkg/scalers/azure_app_insights_scaler_test.go @@ -6,6 +6,8 @@ import ( "strings" "testing" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -242,7 +244,7 @@ func TestAzureAppInsightsGetMetricSpecForScaling(t *testing.T) { ctx := context.Background() if !testData.isError { testData.config.ScalerIndex = scalerIndex - meta, err := parseAzureAppInsightsMetadata(&testData.config) + meta, err := parseAzureAppInsightsMetadata(&testData.config, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/azure_blob_scaler.go b/pkg/scalers/azure_blob_scaler.go index 48d96994d8e..2942caddaf3 100644 --- a/pkg/scalers/azure_blob_scaler.go +++ b/pkg/scalers/azure_blob_scaler.go @@ -22,11 +22,11 @@ import ( "net/http" "strconv" + "github.com/go-logr/logr" "github.com/gobwas/glob" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -46,10 +46,9 @@ type azureBlobScaler struct { metadata *azure.BlobMetadata podIdentity kedav1alpha1.AuthPodIdentity httpClient *http.Client + logger logr.Logger } -var azureBlobLog = logf.Log.WithName("azure_blob_scaler") - // NewAzureBlobScaler creates a new azureBlobScaler func NewAzureBlobScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -57,7 +56,9 @@ func NewAzureBlobScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, podIdentity, err := parseAzureBlobMetadata(config) + logger := InitializeLogger(config, "azure_blob_scaler") + + meta, podIdentity, err := parseAzureBlobMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing azure blob metadata: %s", err) } @@ -70,7 +71,7 @@ func NewAzureBlobScaler(config *ScalerConfig) (Scaler, error) { }, nil } -func parseAzureBlobMetadata(config *ScalerConfig) (*azure.BlobMetadata, kedav1alpha1.AuthPodIdentity, error) { +func parseAzureBlobMetadata(config *ScalerConfig, logger logr.Logger) (*azure.BlobMetadata, kedav1alpha1.AuthPodIdentity, error) { meta := azure.BlobMetadata{} meta.TargetBlobCount = defaultTargetBlobCount meta.BlobDelimiter = defaultBlobDelimiter @@ -79,7 +80,7 @@ func parseAzureBlobMetadata(config *ScalerConfig) (*azure.BlobMetadata, kedav1al if val, ok := config.TriggerMetadata[blobCountMetricName]; ok { blobCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureBlobLog.Error(err, "Error parsing azure blob metadata", "blobCountMetricName", blobCountMetricName) + logger.Error(err, "Error parsing azure blob metadata", "blobCountMetricName", blobCountMetricName) return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure blob metadata %s: %s", blobCountMetricName, err.Error()) } @@ -90,7 +91,7 @@ func parseAzureBlobMetadata(config *ScalerConfig) (*azure.BlobMetadata, kedav1al if val, ok := config.TriggerMetadata[activationBlobCountMetricName]; ok { activationBlobCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureBlobLog.Error(err, "Error parsing azure blob metadata", activationBlobCountMetricName, activationBlobCountMetricName) + logger.Error(err, "Error parsing azure blob metadata", activationBlobCountMetricName, activationBlobCountMetricName) return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure blob metadata %s: %s", activationBlobCountMetricName, err.Error()) } @@ -189,7 +190,7 @@ func (s *azureBlobScaler) IsActive(ctx context.Context) (bool, error) { ) if err != nil { - azureBlobLog.Error(err, "error)") + s.logger.Error(err, "error)") return false, err } @@ -221,7 +222,7 @@ func (s *azureBlobScaler) GetMetrics(ctx context.Context, metricName string, met ) if err != nil { - azureBlobLog.Error(err, "error getting blob list length") + s.logger.Error(err, "error getting blob list length") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/azure_blob_scaler_test.go b/pkg/scalers/azure_blob_scaler_test.go index 1bc8a4236e0..7ae329c5c67 100644 --- a/pkg/scalers/azure_blob_scaler_test.go +++ b/pkg/scalers/azure_blob_scaler_test.go @@ -21,6 +21,8 @@ import ( "net/http" "testing" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -110,7 +112,7 @@ var azBlobMetricIdentifiers = []azBlobMetricIdentifier{ func TestAzBlobParseMetadata(t *testing.T) { for _, testData := range testAzBlobMetadata { _, podIdentity, err := parseAzureBlobMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, - AuthParams: testData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}) + AuthParams: testData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -128,7 +130,7 @@ func TestAzBlobGetMetricSpecForScaling(t *testing.T) { ctx := context.Background() meta, podIdentity, err := parseAzureBlobMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/azure_data_explorer_scaler.go b/pkg/scalers/azure_data_explorer_scaler.go index 21eb28dc313..e792d38e8bb 100644 --- a/pkg/scalers/azure_data_explorer_scaler.go +++ b/pkg/scalers/azure_data_explorer_scaler.go @@ -22,10 +22,10 @@ import ( "strconv" "github.com/Azure/azure-kusto-go/kusto" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -42,15 +42,15 @@ type azureDataExplorerScaler struct { const adxName = "azure-data-explorer" -var dataExplorerLogger = logf.Log.WithName("azure_data_explorer_scaler") - func NewAzureDataExplorerScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - metadata, err := parseAzureDataExplorerMetadata(config) + logger := InitializeLogger(config, "azure_data_explorer_scaler") + + metadata, err := parseAzureDataExplorerMetadata(config, logger) if err != nil { return nil, fmt.Errorf("failed to parse azure data explorer metadata: %s", err) } @@ -64,13 +64,13 @@ func NewAzureDataExplorerScaler(ctx context.Context, config *ScalerConfig) (Scal metricType: metricType, metadata: metadata, client: client, - name: config.Name, - namespace: config.Namespace, + name: config.ScalableObjectName, + namespace: config.ScalableObjectNamespace, }, nil } -func parseAzureDataExplorerMetadata(config *ScalerConfig) (*azure.DataExplorerMetadata, error) { - metadata, err := parseAzureDataExplorerAuthParams(config) +func parseAzureDataExplorerMetadata(config *ScalerConfig, logger logr.Logger) (*azure.DataExplorerMetadata, error) { + metadata, err := parseAzureDataExplorerAuthParams(config, logger) if err != nil { return nil, err } @@ -124,7 +124,7 @@ func parseAzureDataExplorerMetadata(config *ScalerConfig) (*azure.DataExplorerMe } metadata.ActiveDirectoryEndpoint = activeDirectoryEndpoint - dataExplorerLogger.V(1).Info("Parsed azureDataExplorerMetadata", + logger.V(1).Info("Parsed azureDataExplorerMetadata", "database", metadata.DatabaseName, "endpoint", metadata.Endpoint, "metricName", metadata.MetricName, @@ -136,14 +136,14 @@ func parseAzureDataExplorerMetadata(config *ScalerConfig) (*azure.DataExplorerMe return metadata, nil } -func parseAzureDataExplorerAuthParams(config *ScalerConfig) (*azure.DataExplorerMetadata, error) { +func parseAzureDataExplorerAuthParams(config *ScalerConfig, logger logr.Logger) (*azure.DataExplorerMetadata, error) { metadata := azure.DataExplorerMetadata{} switch config.PodIdentity.Provider { case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload: metadata.PodIdentity = config.PodIdentity case "", kedav1alpha1.PodIdentityProviderNone: - dataExplorerLogger.V(1).Info("Pod Identity is not provided. Trying to resolve clientId, clientSecret and tenantId.") + logger.V(1).Info("Pod Identity is not provided. Trying to resolve clientId, clientSecret and tenantId.") tenantID, err := getParameterFromConfig(config, "tenantId", true) if err != nil { diff --git a/pkg/scalers/azure_data_explorer_scaler_test.go b/pkg/scalers/azure_data_explorer_scaler_test.go index 731a716be02..0a71975e526 100644 --- a/pkg/scalers/azure_data_explorer_scaler_test.go +++ b/pkg/scalers/azure_data_explorer_scaler_test.go @@ -21,6 +21,8 @@ import ( "fmt" "testing" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -111,7 +113,8 @@ func TestDataExplorerParseMetadata(t *testing.T) { ResolvedEnv: dataExplorerResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: map[string]string{}, - PodIdentity: kedav1alpha1.AuthPodIdentity{}}) + PodIdentity: kedav1alpha1.AuthPodIdentity{}}, + logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -128,7 +131,7 @@ func TestDataExplorerParseMetadata(t *testing.T) { ResolvedEnv: dataExplorerResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: map[string]string{}, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzure}}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzure}}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -145,7 +148,7 @@ func TestDataExplorerParseMetadata(t *testing.T) { ResolvedEnv: dataExplorerResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: map[string]string{}, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzureWorkload}}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderAzureWorkload}}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -164,7 +167,8 @@ func TestDataExplorerGetMetricSpecForScaling(t *testing.T) { TriggerMetadata: testData.metadataTestData.metadata, AuthParams: map[string]string{}, PodIdentity: kedav1alpha1.AuthPodIdentity{}, - ScalerIndex: testData.scalerIndex}) + ScalerIndex: testData.scalerIndex}, + logr.Logger{}) if err != nil { t.Error("Failed to parse metadata:", err) } diff --git a/pkg/scalers/azure_eventhub_scaler.go b/pkg/scalers/azure_eventhub_scaler.go index 249e0481bfe..080f0ca173e 100644 --- a/pkg/scalers/azure_eventhub_scaler.go +++ b/pkg/scalers/azure_eventhub_scaler.go @@ -28,10 +28,10 @@ import ( eventhub "github.com/Azure/azure-event-hubs-go/v3" "github.com/Azure/azure-storage-blob-go/azblob" az "github.com/Azure/go-autorest/autorest/azure" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -48,13 +48,12 @@ const ( defaultCheckpointStrategy = "" ) -var eventhubLog = logf.Log.WithName("azure_eventhub_scaler") - type azureEventHubScaler struct { metricType v2beta2.MetricTargetType metadata *eventHubMetadata client *eventhub.Hub httpClient *http.Client + logger logr.Logger } type eventHubMetadata struct { @@ -86,6 +85,7 @@ func NewAzureEventHubScaler(ctx context.Context, config *ScalerConfig) (Scaler, metadata: parsedMetadata, client: hub, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + logger: InitializeLogger(config, "azure_eventhub_scaler"), }, nil } @@ -206,19 +206,19 @@ func parseAzureEventHubMetadata(config *ScalerConfig) (*eventHubMetadata, error) } // GetUnprocessedEventCountInPartition gets number of unprocessed events in a given partition -func (scaler *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo *eventhub.HubPartitionRuntimeInformation) (newEventCount int64, checkpoint azure.Checkpoint, err error) { +func (s *azureEventHubScaler) GetUnprocessedEventCountInPartition(ctx context.Context, partitionInfo *eventhub.HubPartitionRuntimeInformation) (newEventCount int64, checkpoint azure.Checkpoint, err error) { // if partitionInfo.LastEnqueuedOffset = -1, that means event hub partition is empty if partitionInfo != nil && partitionInfo.LastEnqueuedOffset == "-1" { return 0, azure.Checkpoint{}, nil } - checkpoint, err = azure.GetCheckpointFromBlobStorage(ctx, scaler.httpClient, scaler.metadata.eventHubInfo, partitionInfo.PartitionID) + checkpoint, err = azure.GetCheckpointFromBlobStorage(ctx, s.httpClient, s.metadata.eventHubInfo, partitionInfo.PartitionID) if err != nil { // if blob not found return the total partition event count err = errors.Unwrap(err) if stErr, ok := err.(azblob.StorageError); ok { if stErr.ServiceCode() == azblob.ServiceCodeBlobNotFound || stErr.ServiceCode() == azblob.ServiceCodeContainerNotFound { - eventhubLog.V(1).Error(err, fmt.Sprintf("Blob container : %s not found to use checkpoint strategy, getting unprocessed event count without checkpoint", scaler.metadata.eventHubInfo.BlobContainer)) + s.logger.V(1).Error(err, fmt.Sprintf("Blob container : %s not found to use checkpoint strategy, getting unprocessed event count without checkpoint", s.metadata.eventHubInfo.BlobContainer)) return GetUnprocessedEventCountWithoutCheckpoint(partitionInfo), azure.Checkpoint{}, nil } } @@ -267,10 +267,10 @@ func GetUnprocessedEventCountWithoutCheckpoint(partitionInfo *eventhub.HubPartit } // IsActive determines if eventhub is active based on number of unprocessed events -func (scaler *azureEventHubScaler) IsActive(ctx context.Context) (bool, error) { - runtimeInfo, err := scaler.client.GetRuntimeInformation(ctx) +func (s *azureEventHubScaler) IsActive(ctx context.Context) (bool, error) { + runtimeInfo, err := s.client.GetRuntimeInformation(ctx) if err != nil { - eventhubLog.Error(err, "unable to get runtimeInfo for isActive") + s.logger.Error(err, "unable to get runtimeInfo for isActive") return false, fmt.Errorf("unable to get runtimeInfo for isActive: %s", err) } @@ -279,18 +279,18 @@ func (scaler *azureEventHubScaler) IsActive(ctx context.Context) (bool, error) { for i := 0; i < len(partitionIDs); i++ { partitionID := partitionIDs[i] - partitionRuntimeInfo, err := scaler.client.GetPartitionInformation(ctx, partitionID) + partitionRuntimeInfo, err := s.client.GetPartitionInformation(ctx, partitionID) if err != nil { return false, fmt.Errorf("unable to get partitionRuntimeInfo for metrics: %s", err) } - unprocessedEventCount, _, err := scaler.GetUnprocessedEventCountInPartition(ctx, partitionRuntimeInfo) + unprocessedEventCount, _, err := s.GetUnprocessedEventCountInPartition(ctx, partitionRuntimeInfo) if err != nil { return false, fmt.Errorf("unable to get unprocessedEventCount for isActive: %s", err) } - if unprocessedEventCount > scaler.metadata.activationThreshold { + if unprocessedEventCount > s.metadata.activationThreshold { return true, nil } } @@ -299,21 +299,21 @@ func (scaler *azureEventHubScaler) IsActive(ctx context.Context) (bool, error) { } // GetMetricSpecForScaling returns metric spec -func (scaler *azureEventHubScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { +func (s *azureEventHubScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { externalMetric := &v2beta2.ExternalMetricSource{ Metric: v2beta2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(scaler.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-eventhub-%s", scaler.metadata.eventHubInfo.EventHubConsumerGroup))), + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("azure-eventhub-%s", s.metadata.eventHubInfo.EventHubConsumerGroup))), }, - Target: GetMetricTarget(scaler.metricType, scaler.metadata.threshold), + Target: GetMetricTarget(s.metricType, s.metadata.threshold), } metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: eventHubMetricType} return []v2beta2.MetricSpec{metricSpec} } // GetMetrics returns metric using total number of unprocessed events in event hub -func (scaler *azureEventHubScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { +func (s *azureEventHubScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { totalUnprocessedEventCount := int64(0) - runtimeInfo, err := scaler.client.GetRuntimeInformation(ctx) + runtimeInfo, err := s.client.GetRuntimeInformation(ctx) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("unable to get runtimeInfo for metrics: %s", err) } @@ -322,28 +322,28 @@ func (scaler *azureEventHubScaler) GetMetrics(ctx context.Context, metricName st for i := 0; i < len(partitionIDs); i++ { partitionID := partitionIDs[i] - partitionRuntimeInfo, err := scaler.client.GetPartitionInformation(ctx, partitionID) + partitionRuntimeInfo, err := s.client.GetPartitionInformation(ctx, partitionID) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("unable to get partitionRuntimeInfo for metrics: %s", err) } unprocessedEventCount := int64(0) - unprocessedEventCount, checkpoint, err := scaler.GetUnprocessedEventCountInPartition(ctx, partitionRuntimeInfo) + unprocessedEventCount, checkpoint, err := s.GetUnprocessedEventCountInPartition(ctx, partitionRuntimeInfo) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("unable to get unprocessedEventCount for metrics: %s", err) } totalUnprocessedEventCount += unprocessedEventCount - eventhubLog.V(1).Info(fmt.Sprintf("Partition ID: %s, Last Enqueued Offset: %s, Checkpoint Offset: %s, Total new events in partition: %d", + s.logger.V(1).Info(fmt.Sprintf("Partition ID: %s, Last Enqueued Offset: %s, Checkpoint Offset: %s, Total new events in partition: %d", partitionRuntimeInfo.PartitionID, partitionRuntimeInfo.LastEnqueuedOffset, checkpoint.Offset, unprocessedEventCount)) } // don't scale out beyond the number of partitions - lagRelatedToPartitionCount := getTotalLagRelatedToPartitionAmount(totalUnprocessedEventCount, int64(len(partitionIDs)), scaler.metadata.threshold) + lagRelatedToPartitionCount := getTotalLagRelatedToPartitionAmount(totalUnprocessedEventCount, int64(len(partitionIDs)), s.metadata.threshold) - eventhubLog.V(1).Info(fmt.Sprintf("Unprocessed events in event hub total: %d, scaling for a lag of %d related to %d partitions", totalUnprocessedEventCount, lagRelatedToPartitionCount, len(partitionIDs))) + s.logger.V(1).Info(fmt.Sprintf("Unprocessed events in event hub total: %d, scaling for a lag of %d related to %d partitions", totalUnprocessedEventCount, lagRelatedToPartitionCount, len(partitionIDs))) metric := GenerateMetricInMili(metricName, float64(lagRelatedToPartitionCount)) @@ -359,11 +359,11 @@ func getTotalLagRelatedToPartitionAmount(unprocessedEventsCount int64, partition } // Close closes Azure Event Hub Scaler -func (scaler *azureEventHubScaler) Close(ctx context.Context) error { - if scaler.client != nil { - err := scaler.client.Close(ctx) +func (s *azureEventHubScaler) Close(ctx context.Context) error { + if s.client != nil { + err := s.client.Close(ctx) if err != nil { - eventhubLog.Error(err, "error closing azure event hub client") + s.logger.Error(err, "error closing azure event hub client") return err } } diff --git a/pkg/scalers/azure_log_analytics_scaler.go b/pkg/scalers/azure_log_analytics_scaler.go index 5f26d42e3b6..c61ccc6105b 100644 --- a/pkg/scalers/azure_log_analytics_scaler.go +++ b/pkg/scalers/azure_log_analytics_scaler.go @@ -32,10 +32,10 @@ import ( "time" "github.com/Azure/azure-amqp-common-go/v3/auth" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -55,6 +55,7 @@ type azureLogAnalyticsScaler struct { name string namespace string httpClient *http.Client + logger logr.Logger } type azureLogAnalyticsMetadata struct { @@ -109,8 +110,6 @@ var tokenCache = struct { m map[string]tokenData }{m: make(map[string]tokenData)} -var logAnalyticsLog = logf.Log.WithName("azure_log_analytics_scaler") - var logAnalyticsResourceURLInCloud = map[string]string{ "AZUREPUBLICCLOUD": "https://api.loganalytics.io", "AZUREUSGOVERNMENTCLOUD": "https://api.loganalytics.us", @@ -126,16 +125,17 @@ func NewAzureLogAnalyticsScaler(config *ScalerConfig) (Scaler, error) { azureLogAnalyticsMetadata, err := parseAzureLogAnalyticsMetadata(config) if err != nil { - return nil, fmt.Errorf("failed to initialize Log Analytics scaler. Scaled object: %s. Namespace: %s. Inner Error: %v", config.Name, config.Namespace, err) + return nil, fmt.Errorf("failed to initialize Log Analytics scaler. Scaled object: %s. Namespace: %s. Inner Error: %v", config.ScalableObjectName, config.ScalableObjectNamespace, err) } return &azureLogAnalyticsScaler{ metricType: metricType, metadata: azureLogAnalyticsMetadata, cache: &sessionCache{metricValue: -1, metricThreshold: -1}, - name: config.Name, - namespace: config.Namespace, + name: config.ScalableObjectName, + namespace: config.ScalableObjectNamespace, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + logger: InitializeLogger(config, "azure_log_analytics_scaler"), }, nil } @@ -268,7 +268,7 @@ func (s *azureLogAnalyticsScaler) GetMetricSpecForScaling(ctx context.Context) [ err := s.updateCache(ctx) if err != nil { - logAnalyticsLog.V(1).Info("failed to get metric spec.", "Scaled object", s.name, "Namespace", s.namespace, "Inner Error", err) + s.logger.V(1).Info("failed to get metric spec.", "Scaled object", s.name, "Namespace", s.namespace, "Inner Error", err) return nil } @@ -330,7 +330,7 @@ func (s *azureLogAnalyticsScaler) getMetricData(ctx context.Context) (metricsDat return metricsData{}, err } - logAnalyticsLog.V(1).Info("Providing metric value", "metrics value", metricsInfo.value, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("Providing metric value", "metrics value", metricsInfo.value, "scaler name", s.name, "namespace", s.namespace) return metricsInfo, nil } @@ -355,10 +355,10 @@ func (s *azureLogAnalyticsScaler) getAccessToken(ctx context.Context) (tokenData switch s.metadata.podIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: - logAnalyticsLog.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) _ = setTokenInCache(s.metadata.clientID, s.metadata.clientSecret, newTokenInfo) case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload: - logAnalyticsLog.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) _ = setTokenInCache(string(s.metadata.podIdentity.Provider), string(s.metadata.podIdentity.Provider), newTokenInfo) } @@ -384,10 +384,10 @@ func (s *azureLogAnalyticsScaler) executeQuery(ctx context.Context, query string switch s.metadata.podIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: - logAnalyticsLog.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("Token for Service Principal has been refreshed", "clientID", s.metadata.clientID, "scaler name", s.name, "namespace", s.namespace) _ = setTokenInCache(s.metadata.clientID, s.metadata.clientSecret, tokenInfo) case kedav1alpha1.PodIdentityProviderAzure, kedav1alpha1.PodIdentityProviderAzureWorkload: - logAnalyticsLog.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("Token for Pod Identity has been refreshed", "type", s.metadata.podIdentity, "scaler name", s.name, "namespace", s.namespace) _ = setTokenInCache(string(s.metadata.podIdentity.Provider), string(s.metadata.podIdentity.Provider), tokenInfo) } @@ -492,7 +492,7 @@ func (s *azureLogAnalyticsScaler) refreshAccessToken(ctx context.Context) (token if currentTimeSec < tokenInfo.NotBefore { if currentTimeSec < tokenInfo.NotBefore+10 { sleepDurationSec := int(tokenInfo.NotBefore - currentTimeSec + 1) - logAnalyticsLog.V(1).Info("AAD token not ready", "delay (seconds)", sleepDurationSec, "scaler name", s.name, "namespace", s.namespace) + s.logger.V(1).Info("AAD token not ready", "delay (seconds)", sleepDurationSec, "scaler name", s.name, "namespace", s.namespace) time.Sleep(time.Duration(sleepDurationSec) * time.Second) } else { return tokenData{}, fmt.Errorf("error getting access token. Details: AAD token has been received, but start date begins in %d seconds, so current operation will be skipped", tokenInfo.NotBefore-currentTimeSec) diff --git a/pkg/scalers/azure_monitor_scaler.go b/pkg/scalers/azure_monitor_scaler.go index cf40bdc9aae..af16e54f5e3 100644 --- a/pkg/scalers/azure_monitor_scaler.go +++ b/pkg/scalers/azure_monitor_scaler.go @@ -23,10 +23,10 @@ import ( "strings" az "github.com/Azure/go-autorest/autorest/azure" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -43,6 +43,7 @@ type azureMonitorScaler struct { metricType v2beta2.MetricTargetType metadata *azureMonitorMetadata podIdentity kedav1alpha1.AuthPodIdentity + logger logr.Logger } type azureMonitorMetadata struct { @@ -52,8 +53,6 @@ type azureMonitorMetadata struct { scalerIndex int } -var azureMonitorLog = logf.Log.WithName("azure_monitor_scaler") - // NewAzureMonitorScaler creates a new AzureMonitorScaler func NewAzureMonitorScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -61,7 +60,9 @@ func NewAzureMonitorScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAzureMonitorMetadata(config) + logger := InitializeLogger(config, "azure_monitor_scaler") + + meta, err := parseAzureMonitorMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing azure monitor metadata: %s", err) } @@ -70,10 +71,11 @@ func NewAzureMonitorScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, podIdentity: config.PodIdentity, + logger: logger, }, nil } -func parseAzureMonitorMetadata(config *ScalerConfig) (*azureMonitorMetadata, error) { +func parseAzureMonitorMetadata(config *ScalerConfig, logger logr.Logger) (*azureMonitorMetadata, error) { meta := azureMonitorMetadata{ azureMonitorInfo: azure.MonitorInfo{}, } @@ -81,7 +83,7 @@ func parseAzureMonitorMetadata(config *ScalerConfig) (*azureMonitorMetadata, err if val, ok := config.TriggerMetadata[targetValueName]; ok && val != "" { targetValue, err := strconv.ParseFloat(val, 64) if err != nil { - azureMonitorLog.Error(err, "Error parsing azure monitor metadata", "targetValue", targetValueName) + logger.Error(err, "Error parsing azure monitor metadata", "targetValue", targetValueName) return nil, fmt.Errorf("error parsing azure monitor metadata %s: %s", targetValueName, err.Error()) } meta.targetValue = targetValue @@ -92,7 +94,7 @@ func parseAzureMonitorMetadata(config *ScalerConfig) (*azureMonitorMetadata, err if val, ok := config.TriggerMetadata[activationTargetValueName]; ok && val != "" { activationTargetValue, err := strconv.ParseFloat(val, 64) if err != nil { - azureMonitorLog.Error(err, "Error parsing azure monitor metadata", "targetValue", activationTargetValueName) + logger.Error(err, "Error parsing azure monitor metadata", "targetValue", activationTargetValueName) return nil, fmt.Errorf("error parsing azure monitor metadata %s: %s", activationTargetValueName, err.Error()) } meta.activationTargetValue = activationTargetValue @@ -216,7 +218,7 @@ func parseAzurePodIdentityParams(config *ScalerConfig) (clientID string, clientP func (s *azureMonitorScaler) IsActive(ctx context.Context) (bool, error) { val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) if err != nil { - azureMonitorLog.Error(err, "error getting azure monitor metric") + s.logger.Error(err, "error getting azure monitor metric") return false, err } @@ -242,7 +244,7 @@ func (s *azureMonitorScaler) GetMetricSpecForScaling(context.Context) []v2beta2. func (s *azureMonitorScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { val, err := azure.GetAzureMetricValue(ctx, s.metadata.azureMonitorInfo, s.podIdentity) if err != nil { - azureMonitorLog.Error(err, "error getting azure monitor metric") + s.logger.Error(err, "error getting azure monitor metric") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/azure_monitor_scaler_test.go b/pkg/scalers/azure_monitor_scaler_test.go index 97fab370dcc..433672b328d 100644 --- a/pkg/scalers/azure_monitor_scaler_test.go +++ b/pkg/scalers/azure_monitor_scaler_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -110,7 +112,7 @@ var azMonitorMetricIdentifiers = []azMonitorMetricIdentifier{ func TestAzMonitorParseMetadata(t *testing.T) { for _, testData := range testParseAzMonitorMetadata { _, err := parseAzureMonitorMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, - AuthParams: testData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}) + AuthParams: testData.authParams, PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -124,11 +126,11 @@ func TestAzMonitorGetMetricSpecForScaling(t *testing.T) { for _, testData := range azMonitorMetricIdentifiers { meta, err := parseAzureMonitorMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockAzMonitorScaler := azureMonitorScaler{"", meta, kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}} + mockAzMonitorScaler := azureMonitorScaler{"", meta, kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, logr.Logger{}} metricSpec := mockAzMonitorScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/azure_pipelines_scaler.go b/pkg/scalers/azure_pipelines_scaler.go index ce6d30d9e1b..995ab3d4804 100644 --- a/pkg/scalers/azure_pipelines_scaler.go +++ b/pkg/scalers/azure_pipelines_scaler.go @@ -9,10 +9,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -35,6 +35,7 @@ type azurePipelinesScaler struct { metricType v2beta2.MetricTargetType metadata *azurePipelinesMetadata httpClient *http.Client + logger logr.Logger } type azurePipelinesMetadata struct { @@ -47,8 +48,6 @@ type azurePipelinesMetadata struct { scalerIndex int } -var azurePipelinesLog = logf.Log.WithName("azure_pipelines_scaler") - // NewAzurePipelinesScaler creates a new AzurePipelinesScaler func NewAzurePipelinesScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { httpClient := kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false) @@ -67,6 +66,7 @@ func NewAzurePipelinesScaler(ctx context.Context, config *ScalerConfig) (Scaler, metricType: metricType, metadata: meta, httpClient: httpClient, + logger: InitializeLogger(config, "azure_pipelines_scaler"), }, nil } @@ -213,7 +213,7 @@ func (s *azurePipelinesScaler) GetMetrics(ctx context.Context, metricName string queuelen, err := s.GetAzurePipelinesQueueLength(ctx) if err != nil { - azurePipelinesLog.Error(err, "error getting pipelines queue length") + s.logger.Error(err, "error getting pipelines queue length") return []external_metrics.ExternalMetricValue{}, err } @@ -267,7 +267,7 @@ func (s *azurePipelinesScaler) IsActive(ctx context.Context) (bool, error) { queuelen, err := s.GetAzurePipelinesQueueLength(ctx) if err != nil { - azurePipelinesLog.Error(err, "error)") + s.logger.Error(err, "error)") return false, err } diff --git a/pkg/scalers/azure_queue_scaler.go b/pkg/scalers/azure_queue_scaler.go index 2db49fad9a0..79d2200ef45 100644 --- a/pkg/scalers/azure_queue_scaler.go +++ b/pkg/scalers/azure_queue_scaler.go @@ -22,10 +22,10 @@ import ( "net/http" "strconv" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -44,6 +44,7 @@ type azureQueueScaler struct { metadata *azureQueueMetadata podIdentity kedav1alpha1.AuthPodIdentity httpClient *http.Client + logger logr.Logger } type azureQueueMetadata struct { @@ -56,8 +57,6 @@ type azureQueueMetadata struct { scalerIndex int } -var azureQueueLog = logf.Log.WithName("azure_queue_scaler") - // NewAzureQueueScaler creates a new scaler for queue func NewAzureQueueScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -65,7 +64,9 @@ func NewAzureQueueScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, podIdentity, err := parseAzureQueueMetadata(config) + logger := InitializeLogger(config, "azure_queue_scaler") + + meta, podIdentity, err := parseAzureQueueMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing azure queue metadata: %s", err) } @@ -75,17 +76,18 @@ func NewAzureQueueScaler(config *ScalerConfig) (Scaler, error) { metadata: meta, podIdentity: podIdentity, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + logger: logger, }, nil } -func parseAzureQueueMetadata(config *ScalerConfig) (*azureQueueMetadata, kedav1alpha1.AuthPodIdentity, error) { +func parseAzureQueueMetadata(config *ScalerConfig, logger logr.Logger) (*azureQueueMetadata, kedav1alpha1.AuthPodIdentity, error) { meta := azureQueueMetadata{} meta.targetQueueLength = defaultTargetQueueLength if val, ok := config.TriggerMetadata[queueLengthMetricName]; ok { queueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureQueueLog.Error(err, "Error parsing azure queue metadata", "queueLengthMetricName", queueLengthMetricName) + logger.Error(err, "Error parsing azure queue metadata", "queueLengthMetricName", queueLengthMetricName) return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure queue metadata %s: %s", queueLengthMetricName, err.Error()) } @@ -97,7 +99,7 @@ func parseAzureQueueMetadata(config *ScalerConfig) (*azureQueueMetadata, kedav1a if val, ok := config.TriggerMetadata[activationQueueLengthMetricName]; ok { activationQueueLength, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureQueueLog.Error(err, "Error parsing azure queue metadata", activationQueueLengthMetricName, activationQueueLengthMetricName) + logger.Error(err, "Error parsing azure queue metadata", activationQueueLengthMetricName, activationQueueLengthMetricName) return nil, kedav1alpha1.AuthPodIdentity{}, fmt.Errorf("error parsing azure queue metadata %s: %s", activationQueueLengthMetricName, err.Error()) } @@ -170,7 +172,7 @@ func (s *azureQueueScaler) IsActive(ctx context.Context) (bool, error) { ) if err != nil { - azureQueueLog.Error(err, "error)") + s.logger.Error(err, "error)") return false, err } @@ -205,7 +207,7 @@ func (s *azureQueueScaler) GetMetrics(ctx context.Context, metricName string, me ) if err != nil { - azureQueueLog.Error(err, "error getting queue length") + s.logger.Error(err, "error getting queue length") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/azure_queue_scaler_test.go b/pkg/scalers/azure_queue_scaler_test.go index bc06a084aca..76431bd4275 100644 --- a/pkg/scalers/azure_queue_scaler_test.go +++ b/pkg/scalers/azure_queue_scaler_test.go @@ -21,6 +21,8 @@ import ( "net/http" "testing" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -104,7 +106,8 @@ func TestAzQueueParseMetadata(t *testing.T) { for _, testData := range testAzQueueMetadata { _, podIdentity, err := parseAzureQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ResolvedEnv: testData.resolvedEnv, AuthParams: testData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}}, + logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -121,7 +124,8 @@ func TestAzQueueGetMetricSpecForScaling(t *testing.T) { for _, testData := range azQueueMetricIdentifiers { meta, podIdentity, err := parseAzureQueueMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testData.metadataTestData.resolvedEnv, AuthParams: testData.metadataTestData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, + logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/azure_servicebus_scaler.go b/pkg/scalers/azure_servicebus_scaler.go index 23346db3136..21e602c12a4 100755 --- a/pkg/scalers/azure_servicebus_scaler.go +++ b/pkg/scalers/azure_servicebus_scaler.go @@ -25,10 +25,10 @@ import ( "github.com/Azure/azure-amqp-common-go/v3/auth" servicebus "github.com/Azure/azure-service-bus-go" az "github.com/Azure/go-autorest/autorest/azure" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/azure" @@ -48,14 +48,13 @@ const ( serviceBusResource = "https://servicebus.azure.net/" ) -var azureServiceBusLog = logf.Log.WithName("azure_servicebus_scaler") - type azureServiceBusScaler struct { ctx context.Context metricType v2beta2.MetricTargetType metadata *azureServiceBusMetadata podIdentity kedav1alpha1.AuthPodIdentity httpClient *http.Client + logger logr.Logger } type azureServiceBusMetadata struct { @@ -78,7 +77,9 @@ func NewAzureServiceBusScaler(ctx context.Context, config *ScalerConfig) (Scaler return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseAzureServiceBusMetadata(config) + logger := InitializeLogger(config, "azure_servicebus_scaler") + + meta, err := parseAzureServiceBusMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing azure service bus metadata: %s", err) } @@ -89,11 +90,12 @@ func NewAzureServiceBusScaler(ctx context.Context, config *ScalerConfig) (Scaler metadata: meta, podIdentity: config.PodIdentity, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + logger: logger, }, nil } // Creates an azureServiceBusMetadata struct from input metadata/env variables -func parseAzureServiceBusMetadata(config *ScalerConfig) (*azureServiceBusMetadata, error) { +func parseAzureServiceBusMetadata(config *ScalerConfig, logger logr.Logger) (*azureServiceBusMetadata, error) { meta := azureServiceBusMetadata{} meta.entityType = none meta.targetLength = defaultTargetMessageCount @@ -102,7 +104,7 @@ func parseAzureServiceBusMetadata(config *ScalerConfig) (*azureServiceBusMetadat if val, ok := config.TriggerMetadata[messageCountMetricName]; ok { messageCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureServiceBusLog.Error(err, "Error parsing azure queue metadata", "messageCount", messageCountMetricName) + logger.Error(err, "Error parsing azure queue metadata", "messageCount", messageCountMetricName) } else { meta.targetLength = messageCount } @@ -112,7 +114,7 @@ func parseAzureServiceBusMetadata(config *ScalerConfig) (*azureServiceBusMetadat if val, ok := config.TriggerMetadata[activationMessageCountMetricName]; ok { activationMessageCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - azureServiceBusLog.Error(err, "Error parsing azure queue metadata", activationMessageCountMetricName, activationMessageCountMetricName) + logger.Error(err, "Error parsing azure queue metadata", activationMessageCountMetricName, activationMessageCountMetricName) return nil, fmt.Errorf("error parsing azure queue metadata %s", activationMessageCountMetricName) } meta.activationTargetLength = activationMessageCount @@ -186,7 +188,7 @@ func parseAzureServiceBusMetadata(config *ScalerConfig) (*azureServiceBusMetadat func (s *azureServiceBusScaler) IsActive(ctx context.Context) (bool, error) { length, err := s.getAzureServiceBusLength(ctx) if err != nil { - azureServiceBusLog.Error(err, "error") + s.logger.Error(err, "error") return false, err } @@ -222,7 +224,7 @@ func (s *azureServiceBusScaler) GetMetrics(ctx context.Context, metricName strin queuelen, err := s.getAzureServiceBusLength(ctx) if err != nil { - azureServiceBusLog.Error(err, "error getting service bus entity length") + s.logger.Error(err, "error getting service bus entity length") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/azure_servicebus_scaler_test.go b/pkg/scalers/azure_servicebus_scaler_test.go index 3b72ea283c2..55a91a7340e 100755 --- a/pkg/scalers/azure_servicebus_scaler_test.go +++ b/pkg/scalers/azure_servicebus_scaler_test.go @@ -23,6 +23,8 @@ import ( "testing" "time" + "github.com/go-logr/logr" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -154,7 +156,8 @@ func TestParseServiceBusMetadata(t *testing.T) { for _, testData := range parseServiceBusMetadataDataset { meta, err := parseAzureServiceBusMetadata(&ScalerConfig{ResolvedEnv: sampleResolvedEnv, TriggerMetadata: testData.metadata, AuthParams: testData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}, ScalerIndex: 0}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.podIdentity}, ScalerIndex: 0}, + logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -209,7 +212,8 @@ func TestAzServiceBusGetMetricSpecForScaling(t *testing.T) { for _, testData := range azServiceBusMetricIdentifiers { meta, err := parseAzureServiceBusMetadata(&ScalerConfig{ResolvedEnv: connectionResolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, - PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}) + PodIdentity: kedav1alpha1.AuthPodIdentity{Provider: testData.metadataTestData.podIdentity}, ScalerIndex: testData.scalerIndex}, + logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/cassandra_scaler.go b/pkg/scalers/cassandra_scaler.go index 85351ac06bd..edc197fc22d 100644 --- a/pkg/scalers/cassandra_scaler.go +++ b/pkg/scalers/cassandra_scaler.go @@ -6,11 +6,11 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" "github.com/gocql/gocql" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -20,6 +20,7 @@ type cassandraScaler struct { metricType v2beta2.MetricTargetType metadata *CassandraMetadata session *gocql.Session + logger logr.Logger } // CassandraMetadata defines metadata used by KEDA to query a Cassandra table. @@ -38,8 +39,6 @@ type CassandraMetadata struct { scalerIndex int } -var cassandraLog = logf.Log.WithName("cassandra_scaler") - // NewCassandraScaler creates a new Cassandra scaler. func NewCassandraScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -47,12 +46,14 @@ func NewCassandraScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := ParseCassandraMetadata(config) + logger := InitializeLogger(config, "cassandra_scaler") + + meta, err := parseCassandraMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing cassandra metadata: %s", err) } - session, err := NewCassandraSession(meta) + session, err := newCassandraSession(meta, logger) if err != nil { return nil, fmt.Errorf("error establishing cassandra session: %s", err) } @@ -61,11 +62,12 @@ func NewCassandraScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, session: session, + logger: logger, }, nil } -// ParseCassandraMetadata parses the metadata and returns a CassandraMetadata or an error if the ScalerConfig is invalid. -func ParseCassandraMetadata(config *ScalerConfig) (*CassandraMetadata, error) { +// parseCassandraMetadata parses the metadata and returns a CassandraMetadata or an error if the ScalerConfig is invalid. +func parseCassandraMetadata(config *ScalerConfig) (*CassandraMetadata, error) { meta := CassandraMetadata{} if val, ok := config.TriggerMetadata["query"]; ok { @@ -159,8 +161,8 @@ func ParseCassandraMetadata(config *ScalerConfig) (*CassandraMetadata, error) { return &meta, nil } -// NewCassandraSession returns a new Cassandra session for the provided CassandraMetadata. -func NewCassandraSession(meta *CassandraMetadata) (*gocql.Session, error) { +// newCassandraSession returns a new Cassandra session for the provided CassandraMetadata. +func newCassandraSession(meta *CassandraMetadata, logger logr.Logger) (*gocql.Session, error) { cluster := gocql.NewCluster(meta.clusterIPAddress) cluster.ProtoVersion = meta.protocolVersion cluster.Consistency = meta.consistency @@ -171,7 +173,7 @@ func NewCassandraSession(meta *CassandraMetadata) (*gocql.Session, error) { session, err := cluster.CreateSession() if err != nil { - cassandraLog.Error(err, "found error creating session") + logger.Error(err, "found error creating session") return nil, err } @@ -220,7 +222,7 @@ func (s *cassandraScaler) GetQueryResult(ctx context.Context) (int64, error) { var value int64 if err := s.session.Query(s.metadata.query).WithContext(ctx).Scan(&value); err != nil { if err != gocql.ErrNotFound { - cassandraLog.Error(err, "query failed") + s.logger.Error(err, "query failed") return 0, err } } diff --git a/pkg/scalers/cassandra_scaler_test.go b/pkg/scalers/cassandra_scaler_test.go index d50c0d209a8..5719fa94bca 100644 --- a/pkg/scalers/cassandra_scaler_test.go +++ b/pkg/scalers/cassandra_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/gocql/gocql" ) @@ -50,7 +51,7 @@ var cassandraMetricIdentifiers = []cassandraMetricIdentifier{ func TestCassandraParseMetadata(t *testing.T) { testCaseNum := 1 for _, testData := range testCassandraMetadata { - _, err := ParseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + _, err := parseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) if err != nil && !testData.isError { t.Errorf("Expected success but got error for unit test # %v", testCaseNum) } @@ -63,13 +64,13 @@ func TestCassandraParseMetadata(t *testing.T) { func TestCassandraGetMetricSpecForScaling(t *testing.T) { for _, testData := range cassandraMetricIdentifiers { - meta, err := ParseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex, AuthParams: testData.metadataTestData.authParams}) + meta, err := parseCassandraMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ScalerIndex: testData.scalerIndex, AuthParams: testData.metadataTestData.authParams}) if err != nil { t.Fatal("Could not parse metadata:", err) } cluster := gocql.NewCluster(meta.clusterIPAddress) session, _ := cluster.CreateSession() - mockCassandraScaler := cassandraScaler{"", meta, session} + mockCassandraScaler := cassandraScaler{"", meta, session, logr.Logger{}} metricSpec := mockCassandraScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index b13b5cf7c14..edaa2af48ea 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -5,12 +5,12 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" ) type cpuMemoryScaler struct { @@ -24,11 +24,11 @@ type cpuMemoryMetadata struct { AverageUtilization *int32 } -var cpuMemoryLog = logf.Log.WithName("cpu_memory_scaler") - // NewCPUMemoryScaler creates a new cpuMemoryScaler func NewCPUMemoryScaler(resourceName v1.ResourceName, config *ScalerConfig) (Scaler, error) { - meta, parseErr := parseResourceMetadata(config) + logger := InitializeLogger(config, "cpu_memory_scaler") + + meta, parseErr := parseResourceMetadata(config, logger) if parseErr != nil { return nil, fmt.Errorf("error parsing %s metadata: %s", resourceName, parseErr) } @@ -39,7 +39,7 @@ func NewCPUMemoryScaler(resourceName v1.ResourceName, config *ScalerConfig) (Sca }, nil } -func parseResourceMetadata(config *ScalerConfig) (*cpuMemoryMetadata, error) { +func parseResourceMetadata(config *ScalerConfig, logger logr.Logger) (*cpuMemoryMetadata, error) { meta := &cpuMemoryMetadata{} var value string var ok bool @@ -48,7 +48,7 @@ func parseResourceMetadata(config *ScalerConfig) (*cpuMemoryMetadata, error) { case ok && value != "" && config.MetricType != "": return nil, fmt.Errorf("only one of trigger.metadata.type or trigger.metricType should be defined") case ok && value != "": - cpuMemoryLog.V(0).Info("trigger.metadata.type is deprecated in favor of trigger.metricType") + logger.V(0).Info("trigger.metadata.type is deprecated in favor of trigger.metricType") meta.Type = v2beta2.MetricTargetType(value) case config.MetricType != "": meta.Type = config.MetricType diff --git a/pkg/scalers/cpu_memory_scaler_test.go b/pkg/scalers/cpu_memory_scaler_test.go index 212c5350744..b07d7e55473 100644 --- a/pkg/scalers/cpu_memory_scaler_test.go +++ b/pkg/scalers/cpu_memory_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "k8s.io/api/autoscaling/v2beta2" v1 "k8s.io/api/core/v1" @@ -40,7 +41,7 @@ func TestCPUMemoryParseMetadata(t *testing.T) { TriggerMetadata: testData.metadata, MetricType: testData.metricType, } - _, err := parseResourceMetadata(config) + _, err := parseResourceMetadata(config, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } diff --git a/pkg/scalers/cron_scaler.go b/pkg/scalers/cron_scaler.go index e438982c042..13826504019 100644 --- a/pkg/scalers/cron_scaler.go +++ b/pkg/scalers/cron_scaler.go @@ -7,11 +7,11 @@ import ( "strings" "time" + "github.com/go-logr/logr" "github.com/robfig/cron/v3" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -24,6 +24,7 @@ const ( type cronScaler struct { metricType v2beta2.MetricTargetType metadata *cronMetadata + logger logr.Logger } type cronMetadata struct { @@ -34,8 +35,6 @@ type cronMetadata struct { scalerIndex int } -var cronLog = logf.Log.WithName("cron_scaler") - // NewCronScaler creates a new cronScaler func NewCronScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -51,6 +50,7 @@ func NewCronScaler(config *ScalerConfig) (Scaler, error) { return &cronScaler{ metricType: metricType, metadata: meta, + logger: InitializeLogger(config, "cron_scaler"), }, nil } @@ -174,7 +174,7 @@ func (s *cronScaler) GetMetrics(ctx context.Context, metricName string, metricSe var currentReplicas = int64(defaultDesiredReplicas) isActive, err := s.IsActive(ctx) if err != nil { - cronLog.Error(err, "error") + s.logger.Error(err, "error") return []external_metrics.ExternalMetricValue{}, err } if isActive { diff --git a/pkg/scalers/cron_scaler_test.go b/pkg/scalers/cron_scaler_test.go index 76723b01a02..097cfff6070 100644 --- a/pkg/scalers/cron_scaler_test.go +++ b/pkg/scalers/cron_scaler_test.go @@ -5,6 +5,7 @@ import ( "testing" "time" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" ) @@ -118,7 +119,7 @@ func TestCronGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockCronScaler := cronScaler{"", meta} + mockCronScaler := cronScaler{"", meta, logr.Logger{}} metricSpec := mockCronScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/datadog_scaler.go b/pkg/scalers/datadog_scaler.go index 5b57e52f504..559e3f47198 100644 --- a/pkg/scalers/datadog_scaler.go +++ b/pkg/scalers/datadog_scaler.go @@ -9,10 +9,10 @@ import ( "time" datadog "github.com/DataDog/datadog-api-client-go/api/v1/datadog" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -20,6 +20,7 @@ import ( type datadogScaler struct { metadata *datadogMetadata apiClient *datadog.APIClient + logger logr.Logger } type datadogMetadata struct { @@ -36,8 +37,6 @@ type datadogMetadata struct { fillValue float64 } -var datadogLog = logf.Log.WithName("datadog_scaler") - var filter *regexp.Regexp func init() { @@ -46,7 +45,9 @@ func init() { // NewDatadogScaler creates a new Datadog scaler func NewDatadogScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { - meta, err := parseDatadogMetadata(config) + logger := InitializeLogger(config, "datadog_scaler") + + meta, err := parseDatadogMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing Datadog metadata: %s", err) } @@ -58,6 +59,7 @@ func NewDatadogScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) return &datadogScaler{ metadata: meta, apiClient: apiClient, + logger: logger, }, nil } @@ -71,7 +73,7 @@ func parseDatadogQuery(q string) (bool, error) { return true, nil } -func parseDatadogMetadata(config *ScalerConfig) (*datadogMetadata, error) { +func parseDatadogMetadata(config *ScalerConfig, logger logr.Logger) (*datadogMetadata, error) { meta := datadogMetadata{} if val, ok := config.TriggerMetadata["age"]; ok { @@ -82,7 +84,7 @@ func parseDatadogMetadata(config *ScalerConfig) (*datadogMetadata, error) { meta.age = age if age < 60 { - datadogLog.Info("selecting a window smaller than 60 seconds can cause Datadog not finding a metric value for the query") + logger.Info("selecting a window smaller than 60 seconds can cause Datadog not finding a metric value for the query") } } else { meta.age = 90 // Default window 90 seconds @@ -128,7 +130,7 @@ func parseDatadogMetadata(config *ScalerConfig) (*datadogMetadata, error) { } if val, ok := config.TriggerMetadata["type"]; ok { - datadogLog.V(0).Info("trigger.metadata.type is deprecated in favor of trigger.metricType") + logger.V(0).Info("trigger.metadata.type is deprecated in favor of trigger.metricType") if config.MetricType != "" { return nil, fmt.Errorf("only one of trigger.metadata.type or trigger.metricType should be defined") } @@ -313,7 +315,7 @@ func (s *datadogScaler) GetMetricSpecForScaling(context.Context) []v2beta2.Metri func (s *datadogScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { num, err := s.getQueryResult(ctx) if err != nil { - datadogLog.Error(err, "error getting metrics from Datadog") + s.logger.Error(err, "error getting metrics from Datadog") return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error getting metrics from Datadog: %s", err) } diff --git a/pkg/scalers/datadog_scaler_test.go b/pkg/scalers/datadog_scaler_test.go index 8a5f07b1fe6..af0caa8a34f 100644 --- a/pkg/scalers/datadog_scaler_test.go +++ b/pkg/scalers/datadog_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" ) @@ -96,7 +97,7 @@ var testDatadogMetadata = []datadogAuthMetadataTestData{ func TestDatadogScalerAuthParams(t *testing.T) { for _, testData := range testDatadogMetadata { - _, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, MetricType: testData.metricType}) + _, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams, MetricType: testData.metricType}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -114,7 +115,7 @@ var datadogMetricIdentifiers = []datadogMetricIdentifier{ func TestDatadogGetMetricSpecForScaling(t *testing.T) { for _, testData := range datadogMetricIdentifiers { - meta, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex, MetricType: testData.metadataTestData.metricType}) + meta, err := parseDatadogMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex, MetricType: testData.metadataTestData.metricType}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/elasticsearch_scaler.go b/pkg/scalers/elasticsearch_scaler.go index 6bb7d7f9036..2e8c562268a 100644 --- a/pkg/scalers/elasticsearch_scaler.go +++ b/pkg/scalers/elasticsearch_scaler.go @@ -12,11 +12,11 @@ import ( "strings" "github.com/elastic/go-elasticsearch/v7" + "github.com/go-logr/logr" "github.com/tidwall/gjson" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -25,6 +25,7 @@ type elasticsearchScaler struct { metricType v2beta2.MetricTargetType metadata *elasticsearchMetadata esClient *elasticsearch.Client + logger logr.Logger } type elasticsearchMetadata struct { @@ -41,8 +42,6 @@ type elasticsearchMetadata struct { metricName string } -var elasticsearchLog = logf.Log.WithName("elasticsearch_scaler") - // NewElasticsearchScaler creates a new elasticsearch scaler func NewElasticsearchScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -50,12 +49,14 @@ func NewElasticsearchScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "elasticsearch_scaler") + meta, err := parseElasticsearchMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing elasticsearch metadata: %s", err) } - esClient, err := newElasticsearchClient(meta) + esClient, err := newElasticsearchClient(meta, logger) if err != nil { return nil, fmt.Errorf("error getting elasticsearch client: %s", err) } @@ -63,6 +64,7 @@ func NewElasticsearchScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, esClient: esClient, + logger: logger, }, nil } @@ -141,7 +143,7 @@ func parseElasticsearchMetadata(config *ScalerConfig) (*elasticsearchMetadata, e } // newElasticsearchClient creates elasticsearch db connection -func newElasticsearchClient(meta *elasticsearchMetadata) (*elasticsearch.Client, error) { +func newElasticsearchClient(meta *elasticsearchMetadata, logger logr.Logger) (*elasticsearch.Client, error) { config := elasticsearch.Config{Addresses: meta.addresses} if meta.username != "" { config.Username = meta.username @@ -156,13 +158,13 @@ func newElasticsearchClient(meta *elasticsearchMetadata) (*elasticsearch.Client, esClient, err := elasticsearch.NewClient(config) if err != nil { - elasticsearchLog.Error(err, fmt.Sprintf("Found error when creating client: %s", err)) + logger.Error(err, fmt.Sprintf("Found error when creating client: %s", err)) return nil, err } _, err = esClient.Info() if err != nil { - elasticsearchLog.Error(err, fmt.Sprintf("Found error when pinging search engine: %s", err)) + logger.Error(err, fmt.Sprintf("Found error when pinging search engine: %s", err)) return nil, err } return esClient, nil @@ -176,7 +178,7 @@ func (s *elasticsearchScaler) Close(ctx context.Context) error { func (s *elasticsearchScaler) IsActive(ctx context.Context) (bool, error) { messages, err := s.getQueryResult(ctx) if err != nil { - elasticsearchLog.Error(err, fmt.Sprintf("Error inspecting elasticsearch: %s", err)) + s.logger.Error(err, fmt.Sprintf("Error inspecting elasticsearch: %s", err)) return false, err } return messages > s.metadata.activationTargetValue, nil @@ -187,7 +189,7 @@ func (s *elasticsearchScaler) getQueryResult(ctx context.Context) (float64, erro // Build the request body. var body bytes.Buffer if err := json.NewEncoder(&body).Encode(buildQuery(s.metadata)); err != nil { - elasticsearchLog.Error(err, "Error encoding query: %s", err) + s.logger.Error(err, "Error encoding query: %s", err) } // Run the templated search @@ -197,7 +199,7 @@ func (s *elasticsearchScaler) getQueryResult(ctx context.Context) (float64, erro s.esClient.SearchTemplate.WithContext(ctx), ) if err != nil { - elasticsearchLog.Error(err, fmt.Sprintf("Could not query elasticsearch: %s", err)) + s.logger.Error(err, fmt.Sprintf("Could not query elasticsearch: %s", err)) return 0, err } diff --git a/pkg/scalers/external_scaler.go b/pkg/scalers/external_scaler.go index 6109e7724a3..22a205b262b 100644 --- a/pkg/scalers/external_scaler.go +++ b/pkg/scalers/external_scaler.go @@ -7,6 +7,7 @@ import ( "sync" "time" + "github.com/go-logr/logr" "github.com/mitchellh/hashstructure" "google.golang.org/grpc" "google.golang.org/grpc/connectivity" @@ -15,7 +16,6 @@ import ( v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" pb "github.com/kedacore/keda/v2/pkg/scalers/externalscaler" ) @@ -24,6 +24,7 @@ type externalScaler struct { metricType v2beta2.MetricTargetType metadata externalScalerMetadata scaledObjectRef pb.ScaledObjectRef + logger logr.Logger } type externalPushScaler struct { @@ -44,8 +45,6 @@ type connectionGroup struct { // a pool of connectionGroup per metadata hash var connectionPool sync.Map -var externalLog = logf.Log.WithName("external_scaler") - // NewExternalScaler creates a new external scaler - calls the GRPC interface // to create a new scaler func NewExternalScaler(config *ScalerConfig) (Scaler, error) { @@ -63,10 +62,11 @@ func NewExternalScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, scaledObjectRef: pb.ScaledObjectRef{ - Name: config.Name, - Namespace: config.Namespace, + Name: config.ScalableObjectName, + Namespace: config.ScalableObjectNamespace, ScalerMetadata: meta.originalMetadata, }, + logger: InitializeLogger(config, "external_scaler"), }, nil } @@ -87,8 +87,8 @@ func NewExternalPushScaler(config *ScalerConfig) (PushScaler, error) { metricType: metricType, metadata: meta, scaledObjectRef: pb.ScaledObjectRef{ - Name: config.Name, - Namespace: config.Namespace, + Name: config.ScalableObjectName, + Namespace: config.ScalableObjectNamespace, ScalerMetadata: meta.originalMetadata, }, }, @@ -137,7 +137,7 @@ func (s *externalScaler) IsActive(ctx context.Context) (bool, error) { response, err := grpcClient.IsActive(ctx, &s.scaledObjectRef) if err != nil { - externalLog.Error(err, "error calling IsActive on external scaler") + s.logger.Error(err, "error calling IsActive on external scaler") return false, err } @@ -154,13 +154,13 @@ func (s *externalScaler) GetMetricSpecForScaling(ctx context.Context) []v2beta2. grpcClient, err := getClientForConnectionPool(s.metadata) if err != nil { - externalLog.Error(err, "error building grpc connection") + s.logger.Error(err, "error building grpc connection") return result } response, err := grpcClient.GetMetricSpec(ctx, &s.scaledObjectRef) if err != nil { - externalLog.Error(err, "error") + s.logger.Error(err, "error") return nil } @@ -205,7 +205,7 @@ func (s *externalScaler) GetMetrics(ctx context.Context, metricName string, metr response, err := grpcClient.GetMetrics(ctx, request) if err != nil { - externalLog.Error(err, "error") + s.logger.Error(err, "error") return []external_metrics.ExternalMetricValue{}, err } @@ -224,11 +224,11 @@ func (s *externalPushScaler) Run(ctx context.Context, active chan<- bool) { runWithLog := func() { grpcClient, err := getClientForConnectionPool(s.metadata) if err != nil { - externalLog.Error(err, "error running internalRun") + s.logger.Error(err, "error running internalRun") return } if err := handleIsActiveStream(ctx, s.scaledObjectRef, grpcClient, active); err != nil { - externalLog.Error(err, "error running internalRun") + s.logger.Error(err, "error running internalRun") return } } diff --git a/pkg/scalers/external_scaler_test.go b/pkg/scalers/external_scaler_test.go index 4d97f08d455..dde0e680281 100644 --- a/pkg/scalers/external_scaler_test.go +++ b/pkg/scalers/external_scaler_test.go @@ -55,7 +55,7 @@ func TestExternalPushScaler_Run(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) for i := 0; i < serverCount*iterationCount; i++ { id := i % serverCount - pushScaler, _ := NewExternalPushScaler(&ScalerConfig{Name: "app", Namespace: "namespace", TriggerMetadata: map[string]string{"scalerAddress": servers[id].address}, ResolvedEnv: map[string]string{}}) + pushScaler, _ := NewExternalPushScaler(&ScalerConfig{ScalableObjectName: "app", ScalableObjectNamespace: "namespace", TriggerMetadata: map[string]string{"scalerAddress": servers[id].address}, ResolvedEnv: map[string]string{}}) go pushScaler.Run(ctx, replyCh[i]) } diff --git a/pkg/scalers/gcp_pubsub_scaler.go b/pkg/scalers/gcp_pubsub_scaler.go index 5c4338f7087..48393792a68 100644 --- a/pkg/scalers/gcp_pubsub_scaler.go +++ b/pkg/scalers/gcp_pubsub_scaler.go @@ -8,10 +8,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -33,6 +33,7 @@ type pubsubScaler struct { client *StackDriverClient metricType v2beta2.MetricTargetType metadata *pubsubMetadata + logger logr.Logger } type pubsubMetadata struct { @@ -45,8 +46,6 @@ type pubsubMetadata struct { scalerIndex int } -var gcpPubSubLog = logf.Log.WithName("gcp_pub_sub_scaler") - // NewPubSubScaler creates a new pubsubScaler func NewPubSubScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -54,7 +53,9 @@ func NewPubSubScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parsePubSubMetadata(config) + logger := InitializeLogger(config, "gcp_pub_sub_scaler") + + meta, err := parsePubSubMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing PubSub metadata: %s", err) } @@ -62,10 +63,11 @@ func NewPubSubScaler(config *ScalerConfig) (Scaler, error) { return &pubsubScaler{ metricType: metricType, metadata: meta, + logger: logger, }, nil } -func parsePubSubMetadata(config *ScalerConfig) (*pubsubMetadata, error) { +func parsePubSubMetadata(config *ScalerConfig, logger logr.Logger) (*pubsubMetadata, error) { meta := pubsubMetadata{} meta.mode = pubsubModeSubscriptionSize @@ -76,7 +78,7 @@ func parsePubSubMetadata(config *ScalerConfig) (*pubsubMetadata, error) { if modePresent || valuePresent { return nil, errors.New("you can use either mode and value fields or subscriptionSize field") } - gcpPubSubLog.Info("subscriptionSize field is deprecated. Use mode and value fields instead") + logger.Info("subscriptionSize field is deprecated. Use mode and value fields instead") meta.mode = pubsubModeSubscriptionSize subSizeValue, err := strconv.ParseInt(subSize, 10, 64) if err != nil { @@ -140,14 +142,14 @@ func (s *pubsubScaler) IsActive(ctx context.Context) (bool, error) { case pubsubModeSubscriptionSize: size, err := s.getMetrics(ctx, pubSubStackDriverSubscriptionSizeMetricName) if err != nil { - gcpPubSubLog.Error(err, "error getting Active Status") + s.logger.Error(err, "error getting Active Status") return false, err } return size > s.metadata.activationValue, nil case pubsubModeOldestUnackedMessageAge: delay, err := s.getMetrics(ctx, pubSubStackDriverOldestUnackedMessageAgeMetricName) if err != nil { - gcpPubSubLog.Error(err, "error getting Active Status") + s.logger.Error(err, "error getting Active Status") return false, err } return delay > s.metadata.activationValue, nil @@ -161,7 +163,7 @@ func (s *pubsubScaler) Close(context.Context) error { err := s.client.metricsClient.Close() s.client = nil if err != nil { - gcpPubSubLog.Error(err, "error closing StackDriver client") + s.logger.Error(err, "error closing StackDriver client") } } @@ -195,13 +197,13 @@ func (s *pubsubScaler) GetMetrics(ctx context.Context, metricName string, metric case pubsubModeSubscriptionSize: value, err = s.getMetrics(ctx, pubSubStackDriverSubscriptionSizeMetricName) if err != nil { - gcpPubSubLog.Error(err, "error getting subscription size") + s.logger.Error(err, "error getting subscription size") return []external_metrics.ExternalMetricValue{}, err } case pubsubModeOldestUnackedMessageAge: value, err = s.getMetrics(ctx, pubSubStackDriverOldestUnackedMessageAgeMetricName) if err != nil { - gcpPubSubLog.Error(err, "error getting oldest unacked message age") + s.logger.Error(err, "error getting oldest unacked message age") return []external_metrics.ExternalMetricValue{}, err } } diff --git a/pkg/scalers/gcp_pubsub_scaler_test.go b/pkg/scalers/gcp_pubsub_scaler_test.go index 408c258cac6..ac0f2e58486 100644 --- a/pkg/scalers/gcp_pubsub_scaler_test.go +++ b/pkg/scalers/gcp_pubsub_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "context" "testing" + + "github.com/go-logr/logr" ) var testPubSubResolvedEnv = map[string]string{ @@ -68,7 +70,7 @@ var gcpSubscriptionNameTests = []gcpPubSubSubscription{ func TestPubSubParseMetadata(t *testing.T) { for _, testData := range testPubSubMetadata { - _, err := parsePubSubMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testPubSubResolvedEnv}) + _, err := parsePubSubMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testPubSubResolvedEnv}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -80,11 +82,11 @@ func TestPubSubParseMetadata(t *testing.T) { func TestGcpPubSubGetMetricSpecForScaling(t *testing.T) { for _, testData := range gcpPubSubMetricIdentifiers { - meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}) + meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockGcpPubSubScaler := pubsubScaler{nil, "", meta} + mockGcpPubSubScaler := pubsubScaler{nil, "", meta, logr.Logger{}} metricSpec := mockGcpPubSubScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name @@ -96,11 +98,11 @@ func TestGcpPubSubGetMetricSpecForScaling(t *testing.T) { func TestGcpPubSubSubscriptionName(t *testing.T) { for _, testData := range gcpSubscriptionNameTests { - meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}) + meta, err := parsePubSubMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testPubSubResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockGcpPubSubScaler := pubsubScaler{nil, "", meta} + mockGcpPubSubScaler := pubsubScaler{nil, "", meta, logr.Logger{}} subscriptionID, projectID := getSubscriptionData(&mockGcpPubSubScaler) if subscriptionID != testData.name || projectID != testData.projectID { diff --git a/pkg/scalers/gcp_stackdriver_scaler.go b/pkg/scalers/gcp_stackdriver_scaler.go index c4aec0e8b1f..0be57fc9295 100644 --- a/pkg/scalers/gcp_stackdriver_scaler.go +++ b/pkg/scalers/gcp_stackdriver_scaler.go @@ -5,11 +5,11 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -22,6 +22,7 @@ type stackdriverScaler struct { client *StackDriverClient metricType v2beta2.MetricTargetType metadata *stackdriverMetadata + logger logr.Logger } type stackdriverMetadata struct { @@ -35,8 +36,6 @@ type stackdriverMetadata struct { aggregation *monitoringpb.Aggregation } -var gcpStackdriverLog = logf.Log.WithName("gcp_stackdriver_scaler") - // NewStackdriverScaler creates a new stackdriverScaler func NewStackdriverScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -44,14 +43,16 @@ func NewStackdriverScaler(ctx context.Context, config *ScalerConfig) (Scaler, er return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseStackdriverMetadata(config) + logger := InitializeLogger(config, "gcp_stackdriver_scaler") + + meta, err := parseStackdriverMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing Stackdriver metadata: %s", err) } - client, err := initializeStackdriverClient(ctx, meta.gcpAuthorization) + client, err := initializeStackdriverClient(ctx, meta.gcpAuthorization, logger) if err != nil { - gcpStackdriverLog.Error(err, "Failed to create stack driver client") + logger.Error(err, "Failed to create stack driver client") return nil, err } @@ -59,10 +60,11 @@ func NewStackdriverScaler(ctx context.Context, config *ScalerConfig) (Scaler, er metricType: metricType, metadata: meta, client: client, + logger: logger, }, nil } -func parseStackdriverMetadata(config *ScalerConfig) (*stackdriverMetadata, error) { +func parseStackdriverMetadata(config *ScalerConfig, logger logr.Logger) (*stackdriverMetadata, error) { meta := stackdriverMetadata{} meta.targetValue = defaultStackdriverTargetValue @@ -92,7 +94,7 @@ func parseStackdriverMetadata(config *ScalerConfig) (*stackdriverMetadata, error if val, ok := config.TriggerMetadata["targetValue"]; ok { targetValue, err := strconv.ParseInt(val, 10, 64) if err != nil { - gcpStackdriverLog.Error(err, "Error parsing targetValue") + logger.Error(err, "Error parsing targetValue") return nil, fmt.Errorf("error parsing targetValue: %s", err.Error()) } @@ -114,7 +116,7 @@ func parseStackdriverMetadata(config *ScalerConfig) (*stackdriverMetadata, error } meta.gcpAuthorization = auth - meta.aggregation, err = parseAggregation(config) + meta.aggregation, err = parseAggregation(config, logger) if err != nil { return nil, err } @@ -122,7 +124,7 @@ func parseStackdriverMetadata(config *ScalerConfig) (*stackdriverMetadata, error return &meta, nil } -func parseAggregation(config *ScalerConfig) (*monitoringpb.Aggregation, error) { +func parseAggregation(config *ScalerConfig, logger logr.Logger) (*monitoringpb.Aggregation, error) { if period, ok := config.TriggerMetadata["alignmentPeriodSeconds"]; ok { if period == "" { return nil, nil @@ -130,11 +132,11 @@ func parseAggregation(config *ScalerConfig) (*monitoringpb.Aggregation, error) { val, err := strconv.ParseInt(period, 10, 64) if val < 60 { - gcpStackdriverLog.Error(err, "Error parsing alignmentPeriodSeconds - must be at least 60") + logger.Error(err, "Error parsing alignmentPeriodSeconds - must be at least 60") return nil, fmt.Errorf("error parsing alignmentPeriodSeconds - must be at least 60") } if err != nil { - gcpStackdriverLog.Error(err, "Error parsing alignmentPeriodSeconds") + logger.Error(err, "Error parsing alignmentPeriodSeconds") return nil, fmt.Errorf("error parsing alignmentPeriodSeconds: %s", err.Error()) } @@ -144,7 +146,7 @@ func parseAggregation(config *ScalerConfig) (*monitoringpb.Aggregation, error) { return nil, nil } -func initializeStackdriverClient(ctx context.Context, gcpAuthorization *gcpAuthorizationMetadata) (*StackDriverClient, error) { +func initializeStackdriverClient(ctx context.Context, gcpAuthorization *gcpAuthorizationMetadata, logger logr.Logger) (*StackDriverClient, error) { var client *StackDriverClient var err error if gcpAuthorization.podIdentityProviderEnabled { @@ -154,7 +156,7 @@ func initializeStackdriverClient(ctx context.Context, gcpAuthorization *gcpAutho } if err != nil { - gcpStackdriverLog.Error(err, "Failed to create stack driver client") + logger.Error(err, "Failed to create stack driver client") return nil, err } return client, nil @@ -163,7 +165,7 @@ func initializeStackdriverClient(ctx context.Context, gcpAuthorization *gcpAutho func (s *stackdriverScaler) IsActive(ctx context.Context) (bool, error) { value, err := s.getMetrics(ctx) if err != nil { - gcpStackdriverLog.Error(err, "error getting metric value") + s.logger.Error(err, "error getting metric value") return false, err } return value > s.metadata.activationTargetValue, nil @@ -174,7 +176,7 @@ func (s *stackdriverScaler) Close(context.Context) error { err := s.client.metricsClient.Close() s.client = nil if err != nil { - gcpStackdriverLog.Error(err, "error closing StackDriver client") + s.logger.Error(err, "error closing StackDriver client") } } @@ -203,7 +205,7 @@ func (s *stackdriverScaler) GetMetricSpecForScaling(context.Context) []v2beta2.M func (s *stackdriverScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { value, err := s.getMetrics(ctx) if err != nil { - gcpStackdriverLog.Error(err, "error getting metric value") + s.logger.Error(err, "error getting metric value") return []external_metrics.ExternalMetricValue{}, err } @@ -216,7 +218,7 @@ func (s *stackdriverScaler) GetMetrics(ctx context.Context, metricName string, m func (s *stackdriverScaler) getMetrics(ctx context.Context) (int64, error) { val, err := s.client.GetMetrics(ctx, s.metadata.filter, s.metadata.projectID, s.metadata.aggregation) if err == nil { - gcpStackdriverLog.V(1).Info( + s.logger.V(1).Info( fmt.Sprintf("Getting metrics for project %s, filter %s and aggregation %v. Result: %d", s.metadata.projectID, s.metadata.filter, diff --git a/pkg/scalers/gcp_stackdriver_scaler_test.go b/pkg/scalers/gcp_stackdriver_scaler_test.go index cc90f76a1e5..995e6fb70d3 100644 --- a/pkg/scalers/gcp_stackdriver_scaler_test.go +++ b/pkg/scalers/gcp_stackdriver_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "context" "testing" + + "github.com/go-logr/logr" ) var testStackdriverResolvedEnv = map[string]string{ @@ -60,7 +62,7 @@ var gcpStackdriverMetricIdentifiers = []gcpStackdriverMetricIdentifier{ func TestStackdriverParseMetadata(t *testing.T) { for _, testData := range testStackdriverMetadata { - _, err := parseStackdriverMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testStackdriverResolvedEnv}) + _, err := parseStackdriverMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testStackdriverResolvedEnv}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -72,11 +74,11 @@ func TestStackdriverParseMetadata(t *testing.T) { func TestGcpStackdriverGetMetricSpecForScaling(t *testing.T) { for _, testData := range gcpStackdriverMetricIdentifiers { - meta, err := parseStackdriverMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testStackdriverResolvedEnv, ScalerIndex: testData.scalerIndex}) + meta, err := parseStackdriverMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testStackdriverResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockGcpStackdriverScaler := stackdriverScaler{nil, "", meta} + mockGcpStackdriverScaler := stackdriverScaler{nil, "", meta, logr.Logger{}} metricSpec := mockGcpStackdriverScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/gcp_storage_scaler.go b/pkg/scalers/gcp_storage_scaler.go index 6c9a99f276c..2f927a5387c 100644 --- a/pkg/scalers/gcp_storage_scaler.go +++ b/pkg/scalers/gcp_storage_scaler.go @@ -7,12 +7,12 @@ import ( "strings" "cloud.google.com/go/storage" + "github.com/go-logr/logr" "google.golang.org/api/iterator" option "google.golang.org/api/option" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -29,6 +29,7 @@ type gcsScaler struct { bucket *storage.BucketHandle metricType v2beta2.MetricTargetType metadata *gcsMetadata + logger logr.Logger } type gcsMetadata struct { @@ -40,8 +41,6 @@ type gcsMetadata struct { activationTargetObjectCount int64 } -var gcsLog = logf.Log.WithName("gcp_storage_scaler") - // NewGcsScaler creates a new gcsScaler func NewGcsScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -49,7 +48,9 @@ func NewGcsScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseGcsMetadata(config) + logger := InitializeLogger(config, "gcp_storage_scaler") + + meta, err := parseGcsMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing GCP storage metadata: %s", err) } @@ -78,37 +79,38 @@ func NewGcsScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("failed to create a handle to bucket %s", meta.bucketName) } - gcsLog.Info(fmt.Sprintf("Metadata %v", meta)) + logger.Info(fmt.Sprintf("Metadata %v", meta)) return &gcsScaler{ client: client, bucket: bucket, metricType: metricType, metadata: meta, + logger: logger, }, nil } -func parseGcsMetadata(config *ScalerConfig) (*gcsMetadata, error) { +func parseGcsMetadata(config *ScalerConfig, logger logr.Logger) (*gcsMetadata, error) { meta := gcsMetadata{} meta.targetObjectCount = defaultTargetObjectCount meta.maxBucketItemsToScan = defaultMaxBucketItemsToScan if val, ok := config.TriggerMetadata["bucketName"]; ok { if val == "" { - gcsLog.Error(nil, "no bucket name given") + logger.Error(nil, "no bucket name given") return nil, fmt.Errorf("no bucket name given") } meta.bucketName = val } else { - gcsLog.Error(nil, "no bucket name given") + logger.Error(nil, "no bucket name given") return nil, fmt.Errorf("no bucket name given") } if val, ok := config.TriggerMetadata["targetObjectCount"]; ok { targetObjectCount, err := strconv.ParseInt(val, 10, 64) if err != nil { - gcsLog.Error(err, "Error parsing targetObjectCount") + logger.Error(err, "Error parsing targetObjectCount") return nil, fmt.Errorf("error parsing targetObjectCount: %s", err.Error()) } @@ -127,7 +129,7 @@ func parseGcsMetadata(config *ScalerConfig) (*gcsMetadata, error) { if val, ok := config.TriggerMetadata["maxBucketItemsToScan"]; ok { maxBucketItemsToScan, err := strconv.ParseInt(val, 10, 64) if err != nil { - gcsLog.Error(err, "Error parsing maxBucketItemsToScan") + logger.Error(err, "Error parsing maxBucketItemsToScan") return nil, fmt.Errorf("error parsing maxBucketItemsToScan: %s", err.Error()) } @@ -192,7 +194,7 @@ func (s *gcsScaler) getItemCount(ctx context.Context, maxCount int64) (int64, er query := &storage.Query{Prefix: ""} err := query.SetAttrSelection([]string{"Name"}) if err != nil { - gcsLog.Error(err, "failed to set attribute selection") + s.logger.Error(err, "failed to set attribute selection") return 0, err } @@ -206,15 +208,15 @@ func (s *gcsScaler) getItemCount(ctx context.Context, maxCount int64) (int64, er } if err != nil { if strings.Contains(err.Error(), "bucket doesn't exist") { - gcsLog.Info("Bucket " + s.metadata.bucketName + " doesn't exist") + s.logger.Info("Bucket " + s.metadata.bucketName + " doesn't exist") return 0, nil } - gcsLog.Error(err, "failed to enumerate items in bucket "+s.metadata.bucketName) + s.logger.Error(err, "failed to enumerate items in bucket "+s.metadata.bucketName) return count, err } count++ } - gcsLog.V(1).Info(fmt.Sprintf("Counted %d items with a limit of %d", count, maxCount)) + s.logger.V(1).Info(fmt.Sprintf("Counted %d items with a limit of %d", count, maxCount)) return count, nil } diff --git a/pkg/scalers/gcp_storage_scaler_test.go b/pkg/scalers/gcp_storage_scaler_test.go index 835f429dbc0..9227eb94fb8 100644 --- a/pkg/scalers/gcp_storage_scaler_test.go +++ b/pkg/scalers/gcp_storage_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "context" "testing" + + "github.com/go-logr/logr" ) var testGcsResolvedEnv = map[string]string{ @@ -50,7 +52,7 @@ var gcpGcsMetricIdentifiers = []gcpGcsMetricIdentifier{ func TestGcsParseMetadata(t *testing.T) { for _, testData := range testGcsMetadata { - _, err := parseGcsMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testGcsResolvedEnv}) + _, err := parseGcsMetadata(&ScalerConfig{AuthParams: testData.authParams, TriggerMetadata: testData.metadata, ResolvedEnv: testGcsResolvedEnv}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -62,11 +64,11 @@ func TestGcsParseMetadata(t *testing.T) { func TestGcsGetMetricSpecForScaling(t *testing.T) { for _, testData := range gcpGcsMetricIdentifiers { - meta, err := parseGcsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcsResolvedEnv, ScalerIndex: testData.scalerIndex}) + meta, err := parseGcsMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, ResolvedEnv: testGcsResolvedEnv, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockGcsScaler := gcsScaler{nil, nil, "", meta} + mockGcsScaler := gcsScaler{nil, nil, "", meta, logr.Logger{}} metricSpec := mockGcsScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/graphite_scaler.go b/pkg/scalers/graphite_scaler.go index 9e2bf2763f1..a4b8e3f5c5d 100644 --- a/pkg/scalers/graphite_scaler.go +++ b/pkg/scalers/graphite_scaler.go @@ -9,10 +9,10 @@ import ( url_pkg "net/url" "strconv" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -32,6 +32,7 @@ type graphiteScaler struct { metricType v2beta2.MetricTargetType metadata *graphiteMetadata httpClient *http.Client + logger logr.Logger } type graphiteMetadata struct { @@ -55,8 +56,6 @@ type grapQueryResult []struct { Datapoints [][]*float64 `json:"datapoints,omitempty"` } -var graphiteLog = logf.Log.WithName("graphite_scaler") - // NewGraphiteScaler creates a new graphiteScaler func NewGraphiteScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -75,6 +74,7 @@ func NewGraphiteScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, httpClient: httpClient, + logger: InitializeLogger(config, "graphite_scaler"), }, nil } @@ -152,7 +152,7 @@ func parseGraphiteMetadata(config *ScalerConfig) (*graphiteMetadata, error) { func (s *graphiteScaler) IsActive(ctx context.Context) (bool, error) { val, err := s.executeGrapQuery(ctx) if err != nil { - graphiteLog.Error(err, "error executing graphite query") + s.logger.Error(err, "error executing graphite query") return false, err } @@ -227,7 +227,7 @@ func (s *graphiteScaler) executeGrapQuery(ctx context.Context) (float64, error) func (s *graphiteScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { val, err := s.executeGrapQuery(ctx) if err != nil { - graphiteLog.Error(err, "error executing graphite query") + s.logger.Error(err, "error executing graphite query") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/huawei_cloudeye_scaler.go b/pkg/scalers/huawei_cloudeye_scaler.go index 0edd011676b..ed178ac0e70 100644 --- a/pkg/scalers/huawei_cloudeye_scaler.go +++ b/pkg/scalers/huawei_cloudeye_scaler.go @@ -10,10 +10,10 @@ import ( "github.com/Huawei/gophercloud/auth/aksk" "github.com/Huawei/gophercloud/openstack" "github.com/Huawei/gophercloud/openstack/ces/v1/metricdata" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -29,6 +29,7 @@ const ( type huaweiCloudeyeScaler struct { metricType v2beta2.MetricTargetType metadata *huaweiCloudeyeMetadata + logger logr.Logger } type huaweiCloudeyeMetadata struct { @@ -70,8 +71,6 @@ type huaweiAuthorizationMetadata struct { SecretKey string // Secret key } -var cloudeyeLog = logf.Log.WithName("huawei_cloudeye_scaler") - // NewHuaweiCloudeyeScaler creates a new huaweiCloudeyeScaler func NewHuaweiCloudeyeScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -79,7 +78,9 @@ func NewHuaweiCloudeyeScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseHuaweiCloudeyeMetadata(config) + logger := InitializeLogger(config, "huawei_cloudeye_scaler") + + meta, err := parseHuaweiCloudeyeMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing Cloudeye metadata: %s", err) } @@ -87,10 +88,11 @@ func NewHuaweiCloudeyeScaler(config *ScalerConfig) (Scaler, error) { return &huaweiCloudeyeScaler{ metricType: metricType, metadata: meta, + logger: logger, }, nil } -func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, error) { +func parseHuaweiCloudeyeMetadata(config *ScalerConfig, logger logr.Logger) (*huaweiCloudeyeMetadata, error) { meta := huaweiCloudeyeMetadata{} meta.metricCollectionTime = defaultCloudeyeMetricCollectionTime @@ -124,7 +126,7 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, if val, ok := config.TriggerMetadata["targetMetricValue"]; ok && val != "" { targetMetricValue, err := strconv.ParseFloat(val, 64) if err != nil { - cloudeyeLog.Error(err, "Error parsing targetMetricValue metadata") + logger.Error(err, "Error parsing targetMetricValue metadata") } else { meta.targetMetricValue = targetMetricValue } @@ -136,7 +138,7 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, if val, ok := config.TriggerMetadata["activationTargetMetricValue"]; ok && val != "" { activationTargetMetricValue, err := strconv.ParseFloat(val, 64) if err != nil { - cloudeyeLog.Error(err, "Error parsing activationTargetMetricValue metadata") + logger.Error(err, "Error parsing activationTargetMetricValue metadata") } meta.activationTargetMetricValue = activationTargetMetricValue } @@ -144,9 +146,9 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, if val, ok := config.TriggerMetadata["minMetricValue"]; ok && val != "" { minMetricValue, err := strconv.ParseFloat(val, 64) if err != nil { - cloudeyeLog.Error(err, "Error parsing minMetricValue metadata") + logger.Error(err, "Error parsing minMetricValue metadata") } else { - cloudeyeLog.Error(err, "minMetricValue is deprecated and will be removed in next versions, please use activationTargetMetricValue instead") + logger.Error(err, "minMetricValue is deprecated and will be removed in next versions, please use activationTargetMetricValue instead") meta.activationTargetMetricValue = minMetricValue } } else { @@ -156,7 +158,7 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, if val, ok := config.TriggerMetadata["metricCollectionTime"]; ok && val != "" { metricCollectionTime, err := strconv.Atoi(val) if err != nil { - cloudeyeLog.Error(err, "Error parsing metricCollectionTime metadata") + logger.Error(err, "Error parsing metricCollectionTime metadata") } else { meta.metricCollectionTime = int64(metricCollectionTime) } @@ -169,7 +171,7 @@ func parseHuaweiCloudeyeMetadata(config *ScalerConfig) (*huaweiCloudeyeMetadata, if val, ok := config.TriggerMetadata["metricPeriod"]; ok && val != "" { _, err := strconv.Atoi(val) if err != nil { - cloudeyeLog.Error(err, "Error parsing metricPeriod metadata") + logger.Error(err, "Error parsing metricPeriod metadata") } else { meta.metricPeriod = val } @@ -239,11 +241,11 @@ func gethuaweiAuthorization(authParams map[string]string) (huaweiAuthorizationMe return meta, nil } -func (h *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - metricValue, err := h.GetCloudeyeMetrics() +func (s *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { + metricValue, err := s.GetCloudeyeMetrics() if err != nil { - cloudeyeLog.Error(err, "Error getting metric value") + s.logger.Error(err, "Error getting metric value") return []external_metrics.ExternalMetricValue{}, err } @@ -251,55 +253,55 @@ func (h *huaweiCloudeyeScaler) GetMetrics(ctx context.Context, metricName string return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (h *huaweiCloudeyeScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { +func (s *huaweiCloudeyeScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { externalMetric := &v2beta2.ExternalMetricSource{ Metric: v2beta2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(h.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("huawei-cloudeye-%s", h.metadata.metricsName))), + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, kedautil.NormalizeString(fmt.Sprintf("huawei-cloudeye-%s", s.metadata.metricsName))), }, - Target: GetMetricTargetMili(h.metricType, h.metadata.targetMetricValue), + Target: GetMetricTargetMili(s.metricType, s.metadata.targetMetricValue), } metricSpec := v2beta2.MetricSpec{External: externalMetric, Type: externalMetricType} return []v2beta2.MetricSpec{metricSpec} } -func (h *huaweiCloudeyeScaler) IsActive(ctx context.Context) (bool, error) { - val, err := h.GetCloudeyeMetrics() +func (s *huaweiCloudeyeScaler) IsActive(ctx context.Context) (bool, error) { + val, err := s.GetCloudeyeMetrics() if err != nil { return false, err } - return val > h.metadata.activationTargetMetricValue, nil + return val > s.metadata.activationTargetMetricValue, nil } -func (h *huaweiCloudeyeScaler) Close(context.Context) error { +func (s *huaweiCloudeyeScaler) Close(context.Context) error { return nil } -func (h *huaweiCloudeyeScaler) GetCloudeyeMetrics() (float64, error) { +func (s *huaweiCloudeyeScaler) GetCloudeyeMetrics() (float64, error) { options := aksk.AKSKOptions{ - IdentityEndpoint: h.metadata.huaweiAuthorization.IdentityEndpoint, - ProjectID: h.metadata.huaweiAuthorization.ProjectID, - AccessKey: h.metadata.huaweiAuthorization.AccessKey, - SecretKey: h.metadata.huaweiAuthorization.SecretKey, - Region: h.metadata.huaweiAuthorization.Region, - Domain: h.metadata.huaweiAuthorization.Domain, - DomainID: h.metadata.huaweiAuthorization.DomainID, - Cloud: h.metadata.huaweiAuthorization.Cloud, + IdentityEndpoint: s.metadata.huaweiAuthorization.IdentityEndpoint, + ProjectID: s.metadata.huaweiAuthorization.ProjectID, + AccessKey: s.metadata.huaweiAuthorization.AccessKey, + SecretKey: s.metadata.huaweiAuthorization.SecretKey, + Region: s.metadata.huaweiAuthorization.Region, + Domain: s.metadata.huaweiAuthorization.Domain, + DomainID: s.metadata.huaweiAuthorization.DomainID, + Cloud: s.metadata.huaweiAuthorization.Cloud, } provider, err := openstack.AuthenticatedClient(options) if err != nil { - cloudeyeLog.Error(err, "Failed to get the provider") + s.logger.Error(err, "Failed to get the provider") return -1, err } sc, err := openstack.NewCESV1(provider, gophercloud.EndpointOpts{}) if err != nil { - cloudeyeLog.Error(err, "get ces client failed") + s.logger.Error(err, "get ces client failed") if ue, ok := err.(*gophercloud.UnifiedError); ok { - cloudeyeLog.Info("ErrCode:", ue.ErrorCode()) - cloudeyeLog.Info("Message:", ue.Message()) + s.logger.Info("ErrCode:", ue.ErrorCode()) + s.logger.Info("Message:", ue.Message()) } return -1, err } @@ -307,38 +309,38 @@ func (h *huaweiCloudeyeScaler) GetCloudeyeMetrics() (float64, error) { opts := metricdata.BatchQueryOpts{ Metrics: []metricdata.Metric{ { - Namespace: h.metadata.namespace, + Namespace: s.metadata.namespace, Dimensions: []map[string]string{ { - "name": h.metadata.dimensionName, - "value": h.metadata.dimensionValue, + "name": s.metadata.dimensionName, + "value": s.metadata.dimensionValue, }, }, - MetricName: h.metadata.metricsName, + MetricName: s.metadata.metricsName, }, }, - From: time.Now().Truncate(time.Minute).Add(time.Second*-1*time.Duration(h.metadata.metricCollectionTime)).UnixNano() / 1e6, + From: time.Now().Truncate(time.Minute).Add(time.Second*-1*time.Duration(s.metadata.metricCollectionTime)).UnixNano() / 1e6, To: time.Now().Truncate(time.Minute).UnixNano() / 1e6, - Period: h.metadata.metricPeriod, - Filter: h.metadata.metricFilter, + Period: s.metadata.metricPeriod, + Filter: s.metadata.metricFilter, } metricdatas, err := metricdata.BatchQuery(sc, opts).ExtractMetricDatas() if err != nil { - cloudeyeLog.Error(err, "query metrics failed") + s.logger.Error(err, "query metrics failed") if ue, ok := err.(*gophercloud.UnifiedError); ok { - cloudeyeLog.Info("ErrCode:", ue.ErrorCode()) - cloudeyeLog.Info("Message:", ue.Message()) + s.logger.Info("ErrCode:", ue.ErrorCode()) + s.logger.Info("Message:", ue.Message()) } return -1, err } - cloudeyeLog.V(1).Info("Received Metric Data", "data", metricdatas) + s.logger.V(1).Info("Received Metric Data", "data", metricdatas) var metricValue float64 if metricdatas[0].Datapoints != nil && len(metricdatas[0].Datapoints) > 0 { - v, ok := metricdatas[0].Datapoints[0][h.metadata.metricFilter].(float64) + v, ok := metricdatas[0].Datapoints[0][s.metadata.metricFilter].(float64) if ok { metricValue = v } else { diff --git a/pkg/scalers/huawei_cloudeye_scaler_test.go b/pkg/scalers/huawei_cloudeye_scaler_test.go index 7207a18b708..5203388ff1e 100644 --- a/pkg/scalers/huawei_cloudeye_scaler_test.go +++ b/pkg/scalers/huawei_cloudeye_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "context" "testing" + + "github.com/go-logr/logr" ) var ( @@ -158,7 +160,7 @@ var huaweiCloudeyeMetricIdentifiers = []huaweiCloudeyeMetricIdentifier{ func TestHuaweiCloudeyeParseMetadata(t *testing.T) { for _, testData := range testHuaweiCloudeyeMetadata { - _, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + _, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}, logr.Logger{}) if err != nil && !testData.isError { t.Errorf("%s: Expected success but got error %s", testData.comment, err) } @@ -170,11 +172,11 @@ func TestHuaweiCloudeyeParseMetadata(t *testing.T) { func TestHuaweiCloudeyeGetMetricSpecForScaling(t *testing.T) { for _, testData := range huaweiCloudeyeMetricIdentifiers { - meta, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseHuaweiCloudeyeMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockHuaweiCloudeyeScaler := huaweiCloudeyeScaler{"", meta} + mockHuaweiCloudeyeScaler := huaweiCloudeyeScaler{"", meta, logr.Logger{}} metricSpec := mockHuaweiCloudeyeScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/ibmmq_scaler.go b/pkg/scalers/ibmmq_scaler.go index f3d4e5186b1..2321d1add62 100644 --- a/pkg/scalers/ibmmq_scaler.go +++ b/pkg/scalers/ibmmq_scaler.go @@ -12,6 +12,7 @@ import ( "strconv" "time" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" @@ -30,6 +31,7 @@ type IBMMQScaler struct { metricType v2beta2.MetricTargetType metadata *IBMMQMetadata defaultHTTPTimeout time.Duration + logger logr.Logger } // IBMMQMetadata Metadata used by KEDA to query IBM MQ queue depth and scale @@ -76,6 +78,7 @@ func NewIBMMQScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, defaultHTTPTimeout: config.GlobalHTTPTimeout, + logger: InitializeLogger(config, "ibm_mq_scaler"), }, nil } diff --git a/pkg/scalers/influxdb_scaler.go b/pkg/scalers/influxdb_scaler.go index 79f6b895e7e..2e349799431 100644 --- a/pkg/scalers/influxdb_scaler.go +++ b/pkg/scalers/influxdb_scaler.go @@ -6,12 +6,12 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" influxdb2 "github.com/influxdata/influxdb-client-go/v2" api "github.com/influxdata/influxdb-client-go/v2/api" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -20,6 +20,7 @@ type influxDBScaler struct { client influxdb2.Client metricType v2beta2.MetricTargetType metadata *influxDBMetadata + logger logr.Logger } type influxDBMetadata struct { @@ -33,8 +34,6 @@ type influxDBMetadata struct { scalerIndex int } -var influxDBLog = logf.Log.WithName("influxdb_scaler") - // NewInfluxDBScaler creates a new influx db scaler func NewInfluxDBScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -42,12 +41,14 @@ func NewInfluxDBScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "influxdb_scaler") + meta, err := parseInfluxDBMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing influxdb metadata: %s", err) } - influxDBLog.Info("starting up influxdb client") + logger.Info("starting up influxdb client") client := influxdb2.NewClientWithOptions( meta.serverURL, meta.authToken, @@ -57,6 +58,7 @@ func NewInfluxDBScaler(config *ScalerConfig) (Scaler, error) { client: client, metricType: metricType, metadata: meta, + logger: logger, }, nil } diff --git a/pkg/scalers/influxdb_scaler_test.go b/pkg/scalers/influxdb_scaler_test.go index b10ff673a78..676fe0cb814 100644 --- a/pkg/scalers/influxdb_scaler_test.go +++ b/pkg/scalers/influxdb_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" influxdb2 "github.com/influxdata/influxdb-client-go/v2" ) @@ -72,7 +73,7 @@ func TestInfluxDBGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockInfluxDBScaler := influxDBScaler{influxdb2.NewClient("https://influxdata.com", "myToken"), "", meta} + mockInfluxDBScaler := influxDBScaler{influxdb2.NewClient("https://influxdata.com", "myToken"), "", meta, logr.Logger{}} metricSpec := mockInfluxDBScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/kafka_scaler.go b/pkg/scalers/kafka_scaler.go index fbdac7ed653..a4f3e3adb44 100644 --- a/pkg/scalers/kafka_scaler.go +++ b/pkg/scalers/kafka_scaler.go @@ -9,10 +9,10 @@ import ( "sync" "github.com/Shopify/sarama" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -22,6 +22,7 @@ type kafkaScaler struct { metadata kafkaMetadata client sarama.Client admin sarama.ClusterAdmin + logger logr.Logger } type kafkaMetadata struct { @@ -77,8 +78,6 @@ const ( invalidOffset = -1 ) -var kafkaLog = logf.Log.WithName("kafka_scaler") - // NewKafkaScaler creates a new kafkaScaler func NewKafkaScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -86,7 +85,9 @@ func NewKafkaScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - kafkaMetadata, err := parseKafkaMetadata(config) + logger := InitializeLogger(config, "kafka_scaler") + + kafkaMetadata, err := parseKafkaMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing kafka metadata: %s", err) } @@ -101,6 +102,7 @@ func NewKafkaScaler(config *ScalerConfig) (Scaler, error) { admin: admin, metricType: metricType, metadata: kafkaMetadata, + logger: logger, }, nil } @@ -156,7 +158,7 @@ func parseKafkaAuthParams(config *ScalerConfig, meta *kafkaMetadata) error { return nil } -func parseKafkaMetadata(config *ScalerConfig) (kafkaMetadata, error) { +func parseKafkaMetadata(config *ScalerConfig, logger logr.Logger) (kafkaMetadata, error) { meta := kafkaMetadata{} switch { case config.TriggerMetadata["bootstrapServersFromEnv"] != "": @@ -183,7 +185,7 @@ func parseKafkaMetadata(config *ScalerConfig) (kafkaMetadata, error) { meta.topic = config.TriggerMetadata["topic"] default: meta.topic = "" - kafkaLog.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+ + logger.V(1).Info(fmt.Sprintf("consumer group %q has no topic specified, "+ "will use all topics subscribed by the consumer group for scaling", meta.group)) } @@ -263,7 +265,7 @@ func (s *kafkaScaler) IsActive(ctx context.Context) (bool, error) { if err != nil && lag == invalidOffset { return true, nil } - kafkaLog.V(1).Info(fmt.Sprintf("Group %s has a lag of %d for topic %s and partition %d\n", s.metadata.group, lag, topic, partitionID)) + s.logger.V(1).Info(fmt.Sprintf("Group %s has a lag of %d for topic %s and partition %d\n", s.metadata.group, lag, topic, partitionID)) // Return as soon as a lag was detected for any partitionID if lag > 0 { @@ -372,7 +374,7 @@ func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offset block := offsets.GetBlock(topic, partitionID) if block == nil { errMsg := fmt.Errorf("error finding offset block for topic %s and partition %d", topic, partitionID) - kafkaLog.Error(errMsg, "") + s.logger.Error(errMsg, "") return 0, errMsg } consumerOffset := block.Offset @@ -384,7 +386,7 @@ func (s *kafkaScaler) getLagForPartition(topic string, partitionID int32, offset msg := fmt.Sprintf( "invalid offset found for topic %s in group %s and partition %d, probably no offset is committed yet. Returning with lag of %d", topic, s.metadata.group, partitionID, retVal) - kafkaLog.V(0).Info(msg) + s.logger.V(0).Info(msg) return retVal, nil } @@ -485,7 +487,7 @@ func (s *kafkaScaler) GetMetrics(ctx context.Context, metricName string, metricS } totalTopicPartitions += (int64)(len(partitionsOffsets)) } - kafkaLog.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold)) + s.logger.V(1).Info(fmt.Sprintf("Kafka scaler: Providing metrics based on totalLag %v, topicPartitions %v, threshold %v", totalLag, len(topicPartitions), s.metadata.lagThreshold)) if !s.metadata.allowIdleConsumers { // don't scale out beyond the number of topicPartitions diff --git a/pkg/scalers/kafka_scaler_test.go b/pkg/scalers/kafka_scaler_test.go index 3d52e8c525b..079730f2de3 100644 --- a/pkg/scalers/kafka_scaler_test.go +++ b/pkg/scalers/kafka_scaler_test.go @@ -4,6 +4,8 @@ import ( "context" "reflect" "testing" + + "github.com/go-logr/logr" ) type parseKafkaMetadataTestData struct { @@ -131,7 +133,7 @@ var kafkaMetricIdentifiers = []kafkaMetricIdentifier{ func TestGetBrokers(t *testing.T) { for _, testData := range parseKafkaMetadataTestDataset { - meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validWithAuthParams}) + meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validWithAuthParams}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -155,7 +157,7 @@ func TestGetBrokers(t *testing.T) { t.Errorf("Expected offsetResetPolicy %s but got %s\n", testData.offsetResetPolicy, meta.offsetResetPolicy) } - meta, err = parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validWithoutAuthParams}) + meta, err = parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: validWithoutAuthParams}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -186,7 +188,7 @@ func TestGetBrokers(t *testing.T) { func TestKafkaAuthParams(t *testing.T) { for _, testData := range parseKafkaAuthParamsTestDataset { - meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: validKafkaMetadata, AuthParams: testData.authParams}) + meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: validKafkaMetadata, AuthParams: testData.authParams}, logr.Logger{}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) @@ -216,11 +218,11 @@ func TestKafkaAuthParams(t *testing.T) { func TestKafkaGetMetricSpecForScaling(t *testing.T) { for _, testData := range kafkaMetricIdentifiers { - meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validWithAuthParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseKafkaMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: validWithAuthParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } - mockKafkaScaler := kafkaScaler{"", meta, nil, nil} + mockKafkaScaler := kafkaScaler{"", meta, nil, nil, logr.Logger{}} metricSpec := mockKafkaScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/kubernetes_workload_scaler.go b/pkg/scalers/kubernetes_workload_scaler.go index e5f266b6234..6c7b3d10764 100644 --- a/pkg/scalers/kubernetes_workload_scaler.go +++ b/pkg/scalers/kubernetes_workload_scaler.go @@ -5,6 +5,7 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/labels" @@ -18,6 +19,7 @@ type kubernetesWorkloadScaler struct { metricType v2beta2.MetricTargetType metadata *kubernetesWorkloadMetadata kubeClient client.Client + logger logr.Logger } const ( @@ -56,13 +58,14 @@ func NewKubernetesWorkloadScaler(kubeClient client.Client, config *ScalerConfig) metricType: metricType, metadata: meta, kubeClient: kubeClient, + logger: InitializeLogger(config, "kubernetes_workload_scaler"), }, nil } func parseWorkloadMetadata(config *ScalerConfig) (*kubernetesWorkloadMetadata, error) { meta := &kubernetesWorkloadMetadata{} var err error - meta.namespace = config.Namespace + meta.namespace = config.ScalableObjectNamespace meta.podSelector, err = labels.Parse(config.TriggerMetadata[podSelectorKey]) if err != nil || meta.podSelector.String() == "" { return nil, fmt.Errorf("invalid pod selector") diff --git a/pkg/scalers/kubernetes_workload_scaler_test.go b/pkg/scalers/kubernetes_workload_scaler_test.go index 53ae145ea69..d7e26e4da05 100644 --- a/pkg/scalers/kubernetes_workload_scaler_test.go +++ b/pkg/scalers/kubernetes_workload_scaler_test.go @@ -36,7 +36,7 @@ var parseWorkloadMetadataTestDataset = []workloadMetadataTestData{ func TestParseWorkloadMetadata(t *testing.T) { for _, testData := range parseWorkloadMetadataTestDataset { - _, err := parseWorkloadMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, Namespace: testData.namespace}) + _, err := parseWorkloadMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, ScalableObjectNamespace: testData.namespace}) if err != nil && !testData.isError { t.Error("Expected success but got error", err) } @@ -69,10 +69,10 @@ func TestWorkloadIsActive(t *testing.T) { s, _ := NewKubernetesWorkloadScaler( fake.NewClientBuilder().WithRuntimeObjects(createPodlist(testData.podCount)).Build(), &ScalerConfig{ - TriggerMetadata: testData.metadata, - AuthParams: map[string]string{}, - GlobalHTTPTimeout: 1000 * time.Millisecond, - Namespace: testData.namespace, + TriggerMetadata: testData.metadata, + AuthParams: map[string]string{}, + GlobalHTTPTimeout: 1000 * time.Millisecond, + ScalableObjectNamespace: testData.namespace, }, ) isActive, _ := s.IsActive(context.TODO()) @@ -108,11 +108,11 @@ func TestWorkloadGetMetricSpecForScaling(t *testing.T) { s, _ := NewKubernetesWorkloadScaler( fake.NewClientBuilder().Build(), &ScalerConfig{ - TriggerMetadata: testData.metadata, - AuthParams: map[string]string{}, - GlobalHTTPTimeout: 1000 * time.Millisecond, - Namespace: testData.namespace, - ScalerIndex: testData.scalerIndex, + TriggerMetadata: testData.metadata, + AuthParams: map[string]string{}, + GlobalHTTPTimeout: 1000 * time.Millisecond, + ScalableObjectNamespace: testData.namespace, + ScalerIndex: testData.scalerIndex, }, ) metric := s.GetMetricSpecForScaling(context.Background()) @@ -175,9 +175,9 @@ func TestWorkloadPhase(t *testing.T) { "podSelector": "app=testphases", "value": "1", }, - AuthParams: map[string]string{}, - GlobalHTTPTimeout: 1000 * time.Millisecond, - Namespace: "default", + AuthParams: map[string]string{}, + GlobalHTTPTimeout: 1000 * time.Millisecond, + ScalableObjectNamespace: "default", }, ) if err != nil { diff --git a/pkg/scalers/liiklus_scaler.go b/pkg/scalers/liiklus_scaler.go index 6725141e880..1d924feca7c 100644 --- a/pkg/scalers/liiklus_scaler.go +++ b/pkg/scalers/liiklus_scaler.go @@ -6,6 +6,7 @@ import ( "strconv" "time" + "github.com/go-logr/logr" "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/credentials/insecure" @@ -22,6 +23,7 @@ type liiklusScaler struct { metadata *liiklusMetadata connection *grpc.ClientConn client liiklus_service.LiiklusServiceClient + logger logr.Logger } type liiklusMetadata struct { @@ -68,6 +70,7 @@ func NewLiiklusScaler(config *ScalerConfig) (Scaler, error) { client: c, metricType: metricType, metadata: lm, + logger: InitializeLogger(config, "liiklus_scaler"), } return &scaler, nil } diff --git a/pkg/scalers/liiklus_scaler_test.go b/pkg/scalers/liiklus_scaler_test.go index 522bb70fe9b..14b45691bda 100644 --- a/pkg/scalers/liiklus_scaler_test.go +++ b/pkg/scalers/liiklus_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/golang/mock/gomock" "github.com/pkg/errors" @@ -173,7 +174,7 @@ func TestLiiklusGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockLiiklusScaler := liiklusScaler{"", meta, nil, nil} + mockLiiklusScaler := liiklusScaler{"", meta, nil, nil, logr.Logger{}} metricSpec := mockLiiklusScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/metrics_api_scaler.go b/pkg/scalers/metrics_api_scaler.go index bca088ea605..4076303c6a7 100644 --- a/pkg/scalers/metrics_api_scaler.go +++ b/pkg/scalers/metrics_api_scaler.go @@ -10,12 +10,12 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" "github.com/tidwall/gjson" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/scalers/authentication" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -25,6 +25,7 @@ type metricsAPIScaler struct { metricType v2beta2.MetricTargetType metadata *metricsAPIScalerMetadata client *http.Client + logger logr.Logger } type metricsAPIScalerMetadata struct { @@ -63,8 +64,6 @@ const ( methodValueQuery = "query" ) -var httpLog = logf.Log.WithName("metrics_api_scaler") - // NewMetricsAPIScaler creates a new HTTP scaler func NewMetricsAPIScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -92,6 +91,7 @@ func NewMetricsAPIScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, client: httpClient, + logger: InitializeLogger(config, "metrics_api_scaler"), }, nil } @@ -252,7 +252,7 @@ func (s *metricsAPIScaler) Close(context.Context) error { func (s *metricsAPIScaler) IsActive(ctx context.Context) (bool, error) { v, err := s.getMetricValue(ctx) if err != nil { - httpLog.Error(err, fmt.Sprintf("Error when checking metric value: %s", err)) + s.logger.Error(err, fmt.Sprintf("Error when checking metric value: %s", err)) return false, err } diff --git a/pkg/scalers/mongo_scaler.go b/pkg/scalers/mongo_scaler.go index 67ac1732b76..bba3ca9ebbd 100644 --- a/pkg/scalers/mongo_scaler.go +++ b/pkg/scalers/mongo_scaler.go @@ -7,6 +7,7 @@ import ( "strconv" "time" + "github.com/go-logr/logr" "go.mongodb.org/mongo-driver/bson" "go.mongodb.org/mongo-driver/mongo" "go.mongodb.org/mongo-driver/mongo/options" @@ -15,7 +16,6 @@ import ( "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -25,6 +25,7 @@ type mongoDBScaler struct { metricType v2beta2.MetricTargetType metadata *mongoDBMetadata client *mongo.Client + logger logr.Logger } // mongoDBMetadata specify mongoDB scaler params. @@ -74,8 +75,6 @@ const ( mongoDBDefaultTimeOut = 10 * time.Second ) -var mongoDBLog = logf.Log.WithName("mongodb_scaler") - // NewMongoDBScaler creates a new mongoDB scaler func NewMongoDBScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -105,6 +104,7 @@ func NewMongoDBScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) metricType: metricType, metadata: meta, client: client, + logger: InitializeLogger(config, "mongodb_scaler"), }, nil } @@ -205,7 +205,7 @@ func parseMongoDBMetadata(config *ScalerConfig) (*mongoDBMetadata, string, error func (s *mongoDBScaler) IsActive(ctx context.Context) (bool, error) { result, err := s.getQueryResult(ctx) if err != nil { - mongoDBLog.Error(err, fmt.Sprintf("failed to get query result by mongoDB, because of %v", err)) + s.logger.Error(err, fmt.Sprintf("failed to get query result by mongoDB, because of %v", err)) return false, err } return result > s.metadata.activationQueryValue, nil @@ -216,7 +216,7 @@ func (s *mongoDBScaler) Close(ctx context.Context) error { if s.client != nil { err := s.client.Disconnect(ctx) if err != nil { - mongoDBLog.Error(err, fmt.Sprintf("failed to close mongoDB connection, because of %v", err)) + s.logger.Error(err, fmt.Sprintf("failed to close mongoDB connection, because of %v", err)) return err } } @@ -231,13 +231,13 @@ func (s *mongoDBScaler) getQueryResult(ctx context.Context) (int64, error) { filter, err := json2BsonDoc(s.metadata.query) if err != nil { - mongoDBLog.Error(err, fmt.Sprintf("failed to convert query param to bson.Doc, because of %v", err)) + s.logger.Error(err, fmt.Sprintf("failed to convert query param to bson.Doc, because of %v", err)) return 0, err } docsNum, err := s.client.Database(s.metadata.dbName).Collection(s.metadata.collection).CountDocuments(ctx, filter) if err != nil { - mongoDBLog.Error(err, fmt.Sprintf("failed to query %v in %v, because of %v", s.metadata.dbName, s.metadata.collection, err)) + s.logger.Error(err, fmt.Sprintf("failed to query %v in %v, because of %v", s.metadata.dbName, s.metadata.collection, err)) return 0, err } diff --git a/pkg/scalers/mongo_scaler_test.go b/pkg/scalers/mongo_scaler_test.go index 57117cb1437..163ef58e1c4 100644 --- a/pkg/scalers/mongo_scaler_test.go +++ b/pkg/scalers/mongo_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "go.mongodb.org/mongo-driver/mongo" ) @@ -86,7 +87,7 @@ func TestMongoDBGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockMongoDBScaler := mongoDBScaler{"", meta, &mongo.Client{}} + mockMongoDBScaler := mongoDBScaler{"", meta, &mongo.Client{}, logr.Logger{}} metricSpec := mockMongoDBScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/mssql_scaler.go b/pkg/scalers/mssql_scaler.go index f2d34773e07..48933ab861b 100644 --- a/pkg/scalers/mssql_scaler.go +++ b/pkg/scalers/mssql_scaler.go @@ -9,10 +9,10 @@ import ( // mssql driver required for this scaler _ "github.com/denisenkom/go-mssqldb" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -22,6 +22,7 @@ type mssqlScaler struct { metricType v2beta2.MetricTargetType metadata *mssqlMetadata connection *sql.DB + logger logr.Logger } // mssqlMetadata defines metadata used by KEDA to query a Microsoft SQL database @@ -62,8 +63,6 @@ type mssqlMetadata struct { scalerIndex int } -var mssqlLog = logf.Log.WithName("mssql_scaler") - // NewMSSQLScaler creates a new mssql scaler func NewMSSQLScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -71,12 +70,14 @@ func NewMSSQLScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "mssql_scaler") + meta, err := parseMSSQLMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing mssql metadata: %s", err) } - conn, err := newMSSQLConnection(meta) + conn, err := newMSSQLConnection(meta, logger) if err != nil { return nil, fmt.Errorf("error establishing mssql connection: %s", err) } @@ -85,6 +86,7 @@ func NewMSSQLScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, connection: conn, + logger: logger, }, nil } @@ -175,18 +177,18 @@ func parseMSSQLMetadata(config *ScalerConfig) (*mssqlMetadata, error) { } // newMSSQLConnection returns a new, opened SQL connection for the provided mssqlMetadata -func newMSSQLConnection(meta *mssqlMetadata) (*sql.DB, error) { +func newMSSQLConnection(meta *mssqlMetadata, logger logr.Logger) (*sql.DB, error) { connStr := getMSSQLConnectionString(meta) db, err := sql.Open("sqlserver", connStr) if err != nil { - mssqlLog.Error(err, fmt.Sprintf("Found error opening mssql: %s", err)) + logger.Error(err, fmt.Sprintf("Found error opening mssql: %s", err)) return nil, err } err = db.Ping() if err != nil { - mssqlLog.Error(err, fmt.Sprintf("Found error pinging mssql: %s", err)) + logger.Error(err, fmt.Sprintf("Found error pinging mssql: %s", err)) return nil, err } @@ -262,7 +264,7 @@ func (s *mssqlScaler) getQueryResult(ctx context.Context) (float64, error) { case err == sql.ErrNoRows: value = 0 case err != nil: - mssqlLog.Error(err, fmt.Sprintf("Could not query mssql database: %s", err)) + s.logger.Error(err, fmt.Sprintf("Could not query mssql database: %s", err)) return 0, err } @@ -283,7 +285,7 @@ func (s *mssqlScaler) IsActive(ctx context.Context) (bool, error) { func (s *mssqlScaler) Close(context.Context) error { err := s.connection.Close() if err != nil { - mssqlLog.Error(err, "Error closing mssql connection") + s.logger.Error(err, "Error closing mssql connection") return err } diff --git a/pkg/scalers/mysql_scaler.go b/pkg/scalers/mysql_scaler.go index d7136837fec..40d9a52005a 100644 --- a/pkg/scalers/mysql_scaler.go +++ b/pkg/scalers/mysql_scaler.go @@ -7,11 +7,11 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" "github.com/go-sql-driver/mysql" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -20,6 +20,7 @@ type mySQLScaler struct { metricType v2beta2.MetricTargetType metadata *mySQLMetadata connection *sql.DB + logger logr.Logger } type mySQLMetadata struct { @@ -35,8 +36,6 @@ type mySQLMetadata struct { metricName string } -var mySQLLog = logf.Log.WithName("mysql_scaler") - // NewMySQLScaler creates a new MySQL scaler func NewMySQLScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -44,12 +43,14 @@ func NewMySQLScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "mysql_scaler") + meta, err := parseMySQLMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing MySQL metadata: %s", err) } - conn, err := newMySQLConnection(meta) + conn, err := newMySQLConnection(meta, logger) if err != nil { return nil, fmt.Errorf("error establishing MySQL connection: %s", err) } @@ -57,6 +58,7 @@ func NewMySQLScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, connection: conn, + logger: logger, }, nil } @@ -155,16 +157,16 @@ func metadataToConnectionStr(meta *mySQLMetadata) string { } // newMySQLConnection creates MySQL db connection -func newMySQLConnection(meta *mySQLMetadata) (*sql.DB, error) { +func newMySQLConnection(meta *mySQLMetadata, logger logr.Logger) (*sql.DB, error) { connStr := metadataToConnectionStr(meta) db, err := sql.Open("mysql", connStr) if err != nil { - mySQLLog.Error(err, fmt.Sprintf("Found error when opening connection: %s", err)) + logger.Error(err, fmt.Sprintf("Found error when opening connection: %s", err)) return nil, err } err = db.Ping() if err != nil { - mySQLLog.Error(err, fmt.Sprintf("Found error when pinging database: %s", err)) + logger.Error(err, fmt.Sprintf("Found error when pinging database: %s", err)) return nil, err } return db, nil @@ -185,7 +187,7 @@ func parseMySQLDbNameFromConnectionStr(connectionString string) string { func (s *mySQLScaler) Close(context.Context) error { err := s.connection.Close() if err != nil { - mySQLLog.Error(err, "Error closing MySQL connection") + s.logger.Error(err, "Error closing MySQL connection") return err } return nil @@ -195,7 +197,7 @@ func (s *mySQLScaler) Close(context.Context) error { func (s *mySQLScaler) IsActive(ctx context.Context) (bool, error) { messages, err := s.getQueryResult(ctx) if err != nil { - mySQLLog.Error(err, fmt.Sprintf("Error inspecting MySQL: %s", err)) + s.logger.Error(err, fmt.Sprintf("Error inspecting MySQL: %s", err)) return false, err } return messages > s.metadata.activationQueryValue, nil @@ -206,7 +208,7 @@ func (s *mySQLScaler) getQueryResult(ctx context.Context) (float64, error) { var value float64 err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&value) if err != nil { - mySQLLog.Error(err, fmt.Sprintf("Could not query MySQL database: %s", err)) + s.logger.Error(err, fmt.Sprintf("Could not query MySQL database: %s", err)) return 0, err } return value, nil diff --git a/pkg/scalers/newrelic_scaler.go b/pkg/scalers/newrelic_scaler.go index e5254250d7e..48e42c2ba59 100644 --- a/pkg/scalers/newrelic_scaler.go +++ b/pkg/scalers/newrelic_scaler.go @@ -6,12 +6,12 @@ import ( "log" "strconv" + "github.com/go-logr/logr" "github.com/newrelic/newrelic-client-go/newrelic" "github.com/newrelic/newrelic-client-go/pkg/nrdb" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -30,6 +30,7 @@ type newrelicScaler struct { metricType v2beta2.MetricTargetType metadata *newrelicMetadata nrClient *newrelic.NewRelic + logger logr.Logger } type newrelicMetadata struct { @@ -42,15 +43,15 @@ type newrelicMetadata struct { scalerIndex int } -var newrelicLog = logf.Log.WithName(fmt.Sprintf("%s_scaler", scalerName)) - func NewNewRelicScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - meta, err := parseNewRelicMetadata(config) + logger := InitializeLogger(config, fmt.Sprintf("%s_scaler", scalerName)) + + meta, err := parseNewRelicMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing %s metadata: %s", scalerName, err) } @@ -65,7 +66,7 @@ func NewNewRelicScaler(config *ScalerConfig) (Scaler, error) { logMsg := fmt.Sprintf("Initializing New Relic Scaler (account %d in region %s)", meta.account, meta.region) - newrelicLog.Info(logMsg) + logger.Info(logMsg) return &newrelicScaler{ metricType: metricType, @@ -73,7 +74,7 @@ func NewNewRelicScaler(config *ScalerConfig) (Scaler, error) { nrClient: nrClient}, nil } -func parseNewRelicMetadata(config *ScalerConfig) (*newrelicMetadata, error) { +func parseNewRelicMetadata(config *ScalerConfig, logger logr.Logger) (*newrelicMetadata, error) { meta := newrelicMetadata{} var err error @@ -102,7 +103,7 @@ func parseNewRelicMetadata(config *ScalerConfig) (*newrelicMetadata, error) { meta.region, err = GetFromAuthOrMeta(config, region) if err != nil { meta.region = "US" - newrelicLog.Info("Using default 'US' region") + logger.Info("Using default 'US' region") } if val, ok := config.TriggerMetadata[threshold]; ok && val != "" { @@ -133,7 +134,7 @@ func parseNewRelicMetadata(config *ScalerConfig) (*newrelicMetadata, error) { func (s *newrelicScaler) IsActive(ctx context.Context) (bool, error) { val, err := s.executeNewRelicQuery(ctx) if err != nil { - newrelicLog.Error(err, "error executing NRQL") + s.logger.Error(err, "error executing NRQL") return false, err } return val > 0, nil @@ -165,7 +166,7 @@ func (s *newrelicScaler) executeNewRelicQuery(ctx context.Context) (float64, err func (s *newrelicScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { val, err := s.executeNewRelicQuery(ctx) if err != nil { - newrelicLog.Error(err, "error executing NRQL query") + s.logger.Error(err, "error executing NRQL query") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/newrelic_scaler_test.go b/pkg/scalers/newrelic_scaler_test.go index cabd142280c..0e10676f169 100644 --- a/pkg/scalers/newrelic_scaler_test.go +++ b/pkg/scalers/newrelic_scaler_test.go @@ -4,6 +4,8 @@ import ( "context" "fmt" "testing" + + "github.com/go-logr/logr" ) type parseNewRelicMetadataTestData struct { @@ -56,7 +58,7 @@ var newrelicMetricIdentifiers = []newrelicMetricIdentifier{ func TestNewRelicParseMetadata(t *testing.T) { for _, testData := range testNewRelicMetadata { - _, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}) + _, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadata, AuthParams: testData.authParams}, logr.Logger{}) if err != nil && !testData.isError { fmt.Printf("X: %s", testData.metadata) t.Error("Expected success but got error", err) @@ -69,7 +71,7 @@ func TestNewRelicParseMetadata(t *testing.T) { } func TestNewRelicGetMetricSpecForScaling(t *testing.T) { for _, testData := range newrelicMetricIdentifiers { - meta, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}) + meta, err := parseNewRelicMetadata(&ScalerConfig{TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.metadataTestData.authParams, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata:", err) } diff --git a/pkg/scalers/openstack_metrics_scaler.go b/pkg/scalers/openstack_metrics_scaler.go index 836f1d93d9d..7e9a5767a15 100644 --- a/pkg/scalers/openstack_metrics_scaler.go +++ b/pkg/scalers/openstack_metrics_scaler.go @@ -11,10 +11,10 @@ import ( "strconv" "time" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/scalers/openstack" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -50,6 +50,7 @@ type openstackMetricScaler struct { metricType v2beta2.MetricTargetType metadata *openstackMetricMetadata metricClient openstack.Client + logger logr.Logger } type measureResult struct { @@ -58,8 +59,6 @@ type measureResult struct { /* end of declarations */ -var openstackMetricLog = logf.Log.WithName("openstack_metric_scaler") - // NewOpenstackMetricScaler creates new openstack metrics scaler instance func NewOpenstackMetricScaler(ctx context.Context, config *ScalerConfig) (Scaler, error) { var keystoneAuth *openstack.KeystoneAuthRequest @@ -70,7 +69,9 @@ func NewOpenstackMetricScaler(ctx context.Context, config *ScalerConfig) (Scaler return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - openstackMetricMetadata, err := parseOpenstackMetricMetadata(config) + logger := InitializeLogger(config, "openstack_metric_scaler") + + openstackMetricMetadata, err := parseOpenstackMetricMetadata(config, logger) if err != nil { return nil, fmt.Errorf("error parsing openstack Metric metadata: %s", err) @@ -104,7 +105,7 @@ func NewOpenstackMetricScaler(ctx context.Context, config *ScalerConfig) (Scaler metricsClient, err = keystoneAuth.RequestClient(ctx) if err != nil { - openstackMetricLog.Error(err, "Fail to retrieve new keystone clinet for openstack metrics scaler") + logger.Error(err, "Fail to retrieve new keystone clinet for openstack metrics scaler") return nil, err } @@ -112,38 +113,39 @@ func NewOpenstackMetricScaler(ctx context.Context, config *ScalerConfig) (Scaler metricType: metricType, metadata: openstackMetricMetadata, metricClient: metricsClient, + logger: logger, }, nil } -func parseOpenstackMetricMetadata(config *ScalerConfig) (*openstackMetricMetadata, error) { +func parseOpenstackMetricMetadata(config *ScalerConfig, logger logr.Logger) (*openstackMetricMetadata, error) { meta := openstackMetricMetadata{} triggerMetadata := config.TriggerMetadata if val, ok := triggerMetadata["metricsURL"]; ok && val != "" { meta.metricsURL = val } else { - openstackMetricLog.Error(fmt.Errorf("no metrics url could be read"), "Error readig metricsURL") + logger.Error(fmt.Errorf("no metrics url could be read"), "Error readig metricsURL") return nil, fmt.Errorf("no metrics url was declared") } if val, ok := triggerMetadata["metricID"]; ok && val != "" { meta.metricID = val } else { - openstackMetricLog.Error(fmt.Errorf("no metric id could be read"), "Error reading metricID") + logger.Error(fmt.Errorf("no metric id could be read"), "Error reading metricID") return nil, fmt.Errorf("no metric id was declared") } if val, ok := triggerMetadata["aggregationMethod"]; ok && val != "" { meta.aggregationMethod = val } else { - openstackMetricLog.Error(fmt.Errorf("no aggregation method could be read"), "Error reading aggregation method") + logger.Error(fmt.Errorf("no aggregation method could be read"), "Error reading aggregation method") return nil, fmt.Errorf("no aggregation method could be read") } if val, ok := triggerMetadata["granularity"]; ok && val != "" { granularity, err := strconv.Atoi(val) if err != nil { - openstackMetricLog.Error(err, "Error converting granulality information %s", err.Error) + logger.Error(err, "Error converting granulality information %s", err.Error) return nil, err } meta.granularity = granularity @@ -155,7 +157,7 @@ func parseOpenstackMetricMetadata(config *ScalerConfig) (*openstackMetricMetadat // converts the string to float64 but its value is convertible to float32 without changing _threshold, err := strconv.ParseFloat(val, 32) if err != nil { - openstackMetricLog.Error(err, "error parsing openstack metric metadata", "threshold", "threshold") + logger.Error(err, "error parsing openstack metric metadata", "threshold", "threshold") return nil, fmt.Errorf("error parsing openstack metric metadata : %s", err.Error()) } @@ -166,7 +168,7 @@ func parseOpenstackMetricMetadata(config *ScalerConfig) (*openstackMetricMetadat // converts the string to float64 but its value is convertible to float32 without changing activationThreshold, err := strconv.ParseFloat(val, 32) if err != nil { - openstackMetricLog.Error(err, "error parsing openstack metric metadata", "activationThreshold", "activationThreshold") + logger.Error(err, "error parsing openstack metric metadata", "activationThreshold", "activationThreshold") return nil, fmt.Errorf("error parsing openstack metric metadata : %s", err.Error()) } @@ -213,14 +215,14 @@ func parseOpenstackMetricAuthenticationMetadata(config *ScalerConfig) (openstack return authMeta, nil } -func (a *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { - metricName := kedautil.NormalizeString(fmt.Sprintf("openstack-metric-%s", a.metadata.metricID)) +func (s *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2beta2.MetricSpec { + metricName := kedautil.NormalizeString(fmt.Sprintf("openstack-metric-%s", s.metadata.metricID)) externalMetric := &v2beta2.ExternalMetricSource{ Metric: v2beta2.MetricIdentifier{ - Name: GenerateMetricNameWithIndex(a.metadata.scalerIndex, metricName), + Name: GenerateMetricNameWithIndex(s.metadata.scalerIndex, metricName), }, - Target: GetMetricTargetMili(a.metricType, a.metadata.threshold), + Target: GetMetricTargetMili(s.metricType, s.metadata.threshold), } metricSpec := v2beta2.MetricSpec{ @@ -231,11 +233,11 @@ func (a *openstackMetricScaler) GetMetricSpecForScaling(context.Context) []v2bet return []v2beta2.MetricSpec{metricSpec} } -func (a *openstackMetricScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - val, err := a.readOpenstackMetrics(ctx) +func (s *openstackMetricScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { + val, err := s.readOpenstackMetrics(ctx) if err != nil { - openstackMetricLog.Error(err, "Error collecting metric value") + s.logger.Error(err, "Error collecting metric value") return []external_metrics.ExternalMetricValue{}, err } @@ -244,63 +246,63 @@ func (a *openstackMetricScaler) GetMetrics(ctx context.Context, metricName strin return append([]external_metrics.ExternalMetricValue{}, metric), nil } -func (a *openstackMetricScaler) IsActive(ctx context.Context) (bool, error) { - val, err := a.readOpenstackMetrics(ctx) +func (s *openstackMetricScaler) IsActive(ctx context.Context) (bool, error) { + val, err := s.readOpenstackMetrics(ctx) if err != nil { return false, err } - return val > a.metadata.activationThreshold, nil + return val > s.metadata.activationThreshold, nil } -func (a *openstackMetricScaler) Close(context.Context) error { +func (s *openstackMetricScaler) Close(context.Context) error { return nil } // Gets measureament from API as float64, converts it to int and return the value. -func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float64, error) { - var metricURL = a.metadata.metricsURL +func (s *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float64, error) { + var metricURL = s.metadata.metricsURL - isValid, validationError := a.metricClient.IsTokenValid(ctx) + isValid, validationError := s.metricClient.IsTokenValid(ctx) if validationError != nil { - openstackMetricLog.Error(validationError, "Unable to check token validity.") + s.logger.Error(validationError, "Unable to check token validity.") return 0, validationError } if !isValid { - tokenRequestError := a.metricClient.RenewToken(ctx) + tokenRequestError := s.metricClient.RenewToken(ctx) if tokenRequestError != nil { - openstackMetricLog.Error(tokenRequestError, "The token being used is invalid") + s.logger.Error(tokenRequestError, "The token being used is invalid") return defaultValueWhenError, tokenRequestError } } - token := a.metricClient.Token + token := s.metricClient.Token openstackMetricsURL, err := url.Parse(metricURL) if err != nil { - openstackMetricLog.Error(err, "metric url provided is invalid") + s.logger.Error(err, "metric url provided is invalid") return defaultValueWhenError, fmt.Errorf("metric url is invalid: %s", err.Error()) } - openstackMetricsURL.Path = path.Join(openstackMetricsURL.Path, a.metadata.metricID+"/measures") + openstackMetricsURL.Path = path.Join(openstackMetricsURL.Path, s.metadata.metricID+"/measures") queryParameter := openstackMetricsURL.Query() granularity := 0 // We start with granularity with value 2 cause gnocchi APIm which is used by openstack, consider a time window, and we want to get the last value - if a.metadata.granularity <= 0 { - openstackMetricLog.Error(fmt.Errorf("granularity value is less than 1"), "Minimum accepatble value expected for ganularity is 1.") + if s.metadata.granularity <= 0 { + s.logger.Error(fmt.Errorf("granularity value is less than 1"), "Minimum accepatble value expected for ganularity is 1.") return defaultValueWhenError, fmt.Errorf("granularity value is less than 1") } - if (a.metadata.granularity / 60) > 0 { - granularity = (a.metadata.granularity / 60) - 1 + if (s.metadata.granularity / 60) > 0 { + granularity = (s.metadata.granularity / 60) - 1 } - queryParameter.Set("granularity", strconv.Itoa(a.metadata.granularity)) - queryParameter.Set("aggregation", a.metadata.aggregationMethod) + queryParameter.Set("granularity", strconv.Itoa(s.metadata.granularity)) + queryParameter.Set("aggregation", s.metadata.aggregationMethod) var currTimeWithWindow string @@ -316,14 +318,14 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float openstackMetricRequest, newReqErr := http.NewRequestWithContext(ctx, "GET", openstackMetricsURL.String(), nil) if newReqErr != nil { - openstackMetricLog.Error(newReqErr, "Could not build metrics request", nil) + s.logger.Error(newReqErr, "Could not build metrics request", nil) } openstackMetricRequest.Header.Set("X-Auth-Token", token) - resp, requestError := a.metricClient.HTTPClient.Do(openstackMetricRequest) + resp, requestError := s.metricClient.HTTPClient.Do(openstackMetricRequest) if requestError != nil { - openstackMetricLog.Error(requestError, "Unable to request Metrics from URL: %s.", a.metadata.metricsURL) + s.logger.Error(requestError, "Unable to request Metrics from URL: %s.", s.metadata.metricsURL) return defaultValueWhenError, requestError } @@ -332,7 +334,7 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float bodyError, readError := ioutil.ReadAll(resp.Body) if readError != nil { - openstackMetricLog.Error(readError, "Request failed with code: %s for URL: %s", resp.StatusCode, a.metadata.metricsURL) + s.logger.Error(readError, "Request failed with code: %s for URL: %s", resp.StatusCode, s.metadata.metricsURL) return defaultValueWhenError, readError } @@ -343,7 +345,7 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float body, errConvertJSON := ioutil.ReadAll(resp.Body) if errConvertJSON != nil { - openstackMetricLog.Error(errConvertJSON, "Failed to convert Body format response to json") + s.logger.Error(errConvertJSON, "Failed to convert Body format response to json") return defaultValueWhenError, err } @@ -354,7 +356,7 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float errUnMarshall := json.Unmarshal(body, &m.measures) if errUnMarshall != nil { - openstackMetricLog.Error(errUnMarshall, "Failed converting json format Body structure.") + s.logger.Error(errUnMarshall, "Failed converting json format Body structure.") return defaultValueWhenError, errUnMarshall } @@ -363,12 +365,12 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float if len(m.measures) > 0 { targetMeasure = m.measures[len(m.measures)-1] } else { - openstackMetricLog.Info("No measure was returned from openstack") + s.logger.Info("No measure was returned from openstack") return defaultValueWhenError, nil } if len(targetMeasure) != 3 { - openstackMetricLog.Error(fmt.Errorf("unexpected json response"), "unexpected json tuple, expected structure is [string, float, float]") + s.logger.Error(fmt.Errorf("unexpected json response"), "unexpected json tuple, expected structure is [string, float, float]") return defaultValueWhenError, fmt.Errorf("unexpected json response") } @@ -376,6 +378,6 @@ func (a *openstackMetricScaler) readOpenstackMetrics(ctx context.Context) (float return val, nil } - openstackMetricLog.Error(fmt.Errorf("failed to convert interface type to float64"), "unable to convert target measure to expected format float64") + s.logger.Error(fmt.Errorf("failed to convert interface type to float64"), "unable to convert target measure to expected format float64") return defaultValueWhenError, fmt.Errorf("failed to convert interface type to float64") } diff --git a/pkg/scalers/openstack_metrics_scaler_test.go b/pkg/scalers/openstack_metrics_scaler_test.go index ee5a217d7ad..615e48f73d9 100644 --- a/pkg/scalers/openstack_metrics_scaler_test.go +++ b/pkg/scalers/openstack_metrics_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "github.com/kedacore/keda/v2/pkg/scalers/openstack" @@ -106,7 +107,7 @@ func TestOpenstackMetricsGetMetricsForSpecScaling(t *testing.T) { for _, testData := range testCases { testData := testData - meta, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}) + meta, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}, logr.Logger{}) if err != nil { t.Fatal("Could not parse metadata from openstack metrics scaler") @@ -118,7 +119,7 @@ func TestOpenstackMetricsGetMetricsForSpecScaling(t *testing.T) { t.Fatal("could not parse openstack metric authentication metadata") } - mockMetricsScaler := openstackMetricScaler{"", meta, openstack.Client{}} + mockMetricsScaler := openstackMetricScaler{"", meta, openstack.Client{}, logr.Logger{}} metricsSpec := mockMetricsScaler.GetMetricSpecForScaling(context.Background()) metricName := metricsSpec[0].External.Metric.Name @@ -144,7 +145,7 @@ func TestOpenstackMetricsGetMetricsForSpecScalingInvalidMetaData(t *testing.T) { for _, testData := range testCases { testData := testData t.Run(testData.name, func(pt *testing.T) { - _, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}) + _, err := parseOpenstackMetricMetadata(&ScalerConfig{ResolvedEnv: testData.resolvedEnv, TriggerMetadata: testData.metadataTestData.metadata, AuthParams: testData.authMetadataTestData.authMetadata, ScalerIndex: testData.scalerIndex}, logr.Logger{}) assert.NotNil(t, err) }) } diff --git a/pkg/scalers/openstack_swift_scaler.go b/pkg/scalers/openstack_swift_scaler.go index 181f119dbfa..ada62de1836 100644 --- a/pkg/scalers/openstack_swift_scaler.go +++ b/pkg/scalers/openstack_swift_scaler.go @@ -10,10 +10,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/scalers/openstack" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -56,10 +56,9 @@ type openstackSwiftScaler struct { metricType v2beta2.MetricTargetType metadata *openstackSwiftMetadata swiftClient openstack.Client + logger logr.Logger } -var openstackSwiftLog = logf.Log.WithName("openstack_swift_scaler") - func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context.Context) (int64, error) { var containerName = s.metadata.containerName var swiftURL = s.metadata.swiftURL @@ -67,7 +66,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context isValid, err := s.swiftClient.IsTokenValid(ctx) if err != nil { - openstackSwiftLog.Error(err, "scaler could not validate the token for authentication") + s.logger.Error(err, "scaler could not validate the token for authentication") return 0, err } @@ -75,7 +74,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context err := s.swiftClient.RenewToken(ctx) if err != nil { - openstackSwiftLog.Error(err, "error requesting token for authentication") + s.logger.Error(err, "error requesting token for authentication") return 0, err } } @@ -85,7 +84,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context swiftContainerURL, err := url.Parse(swiftURL) if err != nil { - openstackSwiftLog.Error(err, fmt.Sprintf("the swiftURL is invalid: %s. You might have forgotten to provide the either 'http' or 'https' in the URL. Check our documentation to see if you missed something", swiftURL)) + s.logger.Error(err, fmt.Sprintf("the swiftURL is invalid: %s. You might have forgotten to provide the either 'http' or 'https' in the URL. Check our documentation to see if you missed something", swiftURL)) return 0, fmt.Errorf("the swiftURL is invalid: %s", err.Error()) } @@ -109,7 +108,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context resp, requestError := s.swiftClient.HTTPClient.Do(swiftRequest) if requestError != nil { - openstackSwiftLog.Error(requestError, fmt.Sprintf("error getting metrics for container '%s'. You probably specified the wrong swift URL or the URL is not reachable", containerName)) + s.logger.Error(requestError, fmt.Sprintf("error getting metrics for container '%s'. You probably specified the wrong swift URL or the URL is not reachable", containerName)) return 0, requestError } @@ -118,7 +117,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context body, readError := ioutil.ReadAll(resp.Body) if readError != nil { - openstackSwiftLog.Error(readError, "could not read response body from Swift API") + s.logger.Error(readError, "could not read response body from Swift API") return 0, readError } if resp.StatusCode >= http.StatusOK && resp.StatusCode < http.StatusMultipleChoices { @@ -137,7 +136,7 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context objectLimit, conversionError := strconv.ParseInt(s.metadata.objectLimit, 10, 64) if conversionError != nil { - openstackSwiftLog.Error(err, fmt.Sprintf("the objectLimit value provided is invalid: %v", s.metadata.objectLimit)) + s.logger.Error(err, fmt.Sprintf("the objectLimit value provided is invalid: %v", s.metadata.objectLimit)) return 0, conversionError } @@ -160,17 +159,17 @@ func (s *openstackSwiftScaler) getOpenstackSwiftContainerObjectCount(ctx context } if resp.StatusCode == http.StatusUnauthorized { - openstackSwiftLog.Error(nil, "the retrieved token is not a valid token. Provide the correct auth credentials so the scaler can retrieve a valid access token (Unauthorized)") + s.logger.Error(nil, "the retrieved token is not a valid token. Provide the correct auth credentials so the scaler can retrieve a valid access token (Unauthorized)") return 0, fmt.Errorf("the retrieved token is not a valid token. Provide the correct auth credentials so the scaler can retrieve a valid access token (Unauthorized)") } if resp.StatusCode == http.StatusForbidden { - openstackSwiftLog.Error(nil, "the retrieved token is a valid token, but it does not have sufficient permission to retrieve Swift and/or container metadata (Forbidden)") + s.logger.Error(nil, "the retrieved token is a valid token, but it does not have sufficient permission to retrieve Swift and/or container metadata (Forbidden)") return 0, fmt.Errorf("the retrieved token is a valid token, but it does not have sufficient permission to retrieve Swift and/or container metadata (Forbidden)") } if resp.StatusCode == http.StatusNotFound { - openstackSwiftLog.Error(nil, fmt.Sprintf("the container '%s' does not exist (Not Found)", containerName)) + s.logger.Error(nil, fmt.Sprintf("the container '%s' does not exist (Not Found)", containerName)) return 0, fmt.Errorf("the container '%s' does not exist (Not Found)", containerName) } @@ -188,6 +187,8 @@ func NewOpenstackSwiftScaler(ctx context.Context, config *ScalerConfig) (Scaler, return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "openstack_swift_scaler") + openstackSwiftMetadata, err := parseOpenstackSwiftMetadata(config) if err != nil { @@ -242,6 +243,7 @@ func NewOpenstackSwiftScaler(ctx context.Context, config *ScalerConfig) (Scaler, metricType: metricType, metadata: openstackSwiftMetadata, swiftClient: swiftClient, + logger: logger, }, nil } @@ -385,7 +387,7 @@ func (s *openstackSwiftScaler) GetMetrics(ctx context.Context, metricName string objectCount, err := s.getOpenstackSwiftContainerObjectCount(ctx) if err != nil { - openstackSwiftLog.Error(err, "error getting objectCount") + s.logger.Error(err, "error getting objectCount") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/openstack_swift_scaler_test.go b/pkg/scalers/openstack_swift_scaler_test.go index 6cf66213a68..de3431c8c2e 100644 --- a/pkg/scalers/openstack_swift_scaler_test.go +++ b/pkg/scalers/openstack_swift_scaler_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" "github.com/kedacore/keda/v2/pkg/scalers/openstack" @@ -112,7 +113,7 @@ func TestOpenstackSwiftGetMetricSpecForScaling(t *testing.T) { t.Fatal("Could not parse auth metadata:", err) } - mockSwiftScaler := openstackSwiftScaler{"", meta, openstack.Client{}} + mockSwiftScaler := openstackSwiftScaler{"", meta, openstack.Client{}, logr.Logger{}} metricSpec := mockSwiftScaler.GetMetricSpecForScaling(context.Background()) diff --git a/pkg/scalers/postgresql_scaler.go b/pkg/scalers/postgresql_scaler.go index fef0c33ea9a..138bd6ed9ba 100644 --- a/pkg/scalers/postgresql_scaler.go +++ b/pkg/scalers/postgresql_scaler.go @@ -6,12 +6,12 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" // PostreSQL drive required for this scaler _ "github.com/lib/pq" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -20,6 +20,7 @@ type postgreSQLScaler struct { metricType v2beta2.MetricTargetType metadata *postgreSQLMetadata connection *sql.DB + logger logr.Logger } type postgreSQLMetadata struct { @@ -31,8 +32,6 @@ type postgreSQLMetadata struct { scalerIndex int } -var postgreSQLLog = logf.Log.WithName("postgreSQL_scaler") - // NewPostgreSQLScaler creates a new postgreSQL scaler func NewPostgreSQLScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -40,12 +39,14 @@ func NewPostgreSQLScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "postgresql_scaler") + meta, err := parsePostgreSQLMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing postgreSQL metadata: %s", err) } - conn, err := getConnection(meta) + conn, err := getConnection(meta, logger) if err != nil { return nil, fmt.Errorf("error establishing postgreSQL connection: %s", err) } @@ -53,6 +54,7 @@ func NewPostgreSQLScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, connection: conn, + logger: logger, }, nil } @@ -142,15 +144,15 @@ func parsePostgreSQLMetadata(config *ScalerConfig) (*postgreSQLMetadata, error) return &meta, nil } -func getConnection(meta *postgreSQLMetadata) (*sql.DB, error) { +func getConnection(meta *postgreSQLMetadata, logger logr.Logger) (*sql.DB, error) { db, err := sql.Open("postgres", meta.connection) if err != nil { - postgreSQLLog.Error(err, fmt.Sprintf("Found error opening postgreSQL: %s", err)) + logger.Error(err, fmt.Sprintf("Found error opening postgreSQL: %s", err)) return nil, err } err = db.Ping() if err != nil { - postgreSQLLog.Error(err, fmt.Sprintf("Found error pinging postgreSQL: %s", err)) + logger.Error(err, fmt.Sprintf("Found error pinging postgreSQL: %s", err)) return nil, err } return db, nil @@ -160,7 +162,7 @@ func getConnection(meta *postgreSQLMetadata) (*sql.DB, error) { func (s *postgreSQLScaler) Close(context.Context) error { err := s.connection.Close() if err != nil { - postgreSQLLog.Error(err, "Error closing postgreSQL connection") + s.logger.Error(err, "Error closing postgreSQL connection") return err } return nil @@ -180,7 +182,7 @@ func (s *postgreSQLScaler) getActiveNumber(ctx context.Context) (float64, error) var id float64 err := s.connection.QueryRowContext(ctx, s.metadata.query).Scan(&id) if err != nil { - postgreSQLLog.Error(err, fmt.Sprintf("could not query postgreSQL: %s", err)) + s.logger.Error(err, fmt.Sprintf("could not query postgreSQL: %s", err)) return 0, fmt.Errorf("could not query postgreSQL: %s", err) } return id, nil diff --git a/pkg/scalers/postgresql_scaler_test.go b/pkg/scalers/postgresql_scaler_test.go index 0eaba6b7092..5fb14426d09 100644 --- a/pkg/scalers/postgresql_scaler_test.go +++ b/pkg/scalers/postgresql_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "context" "testing" + + "github.com/go-logr/logr" ) type parsePostgreSQLMetadataTestData struct { @@ -45,7 +47,7 @@ func TestPosgresSQLGetMetricSpecForScaling(t *testing.T) { if err != nil { t.Fatal("Could not parse metadata:", err) } - mockPostgresSQLScaler := postgreSQLScaler{"", meta, nil} + mockPostgresSQLScaler := postgreSQLScaler{"", meta, nil, logr.Logger{}} metricSpec := mockPostgresSQLScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/predictkube_scaler.go b/pkg/scalers/predictkube_scaler.go index 0336dbe0173..4f25bfe7388 100644 --- a/pkg/scalers/predictkube_scaler.go +++ b/pkg/scalers/predictkube_scaler.go @@ -15,6 +15,7 @@ import ( tc "github.com/dysnix/predictkube-libs/external/types_convertation" "github.com/dysnix/predictkube-proto/external/proto/commonproto" pb "github.com/dysnix/predictkube-proto/external/proto/services" + "github.com/go-logr/logr" "github.com/go-playground/validator/v10" "github.com/prometheus/client_golang/api" v1 "github.com/prometheus/client_golang/api/prometheus/v1" @@ -28,7 +29,6 @@ import ( "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/scalers/authentication" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -81,6 +81,7 @@ type PredictKubeScaler struct { grpcClient pb.MlEngineServiceClient healthClient health.HealthClient api v1.API + logger logr.Logger } type predictKubeMetadata struct { @@ -96,8 +97,6 @@ type predictKubeMetadata struct { scalerIndex int } -var predictKubeLog = logf.Log.WithName("predictkube_scaler") - func (s *PredictKubeScaler) setupClientConn() error { clientOpt, err := pc.SetGrpcClientOptions(grpcConf, &libs.Base{ @@ -141,17 +140,18 @@ func (s *PredictKubeScaler) setupClientConn() error { func NewPredictKubeScaler(ctx context.Context, config *ScalerConfig) (*PredictKubeScaler, error) { s := &PredictKubeScaler{} + logger := InitializeLogger(config, "predictkube_scaler") + metricType, err := GetMetricTargetType(config) if err != nil { - predictKubeLog.Error(err, "error getting scaler metric type") + logger.Error(err, "error getting scaler metric type") return nil, fmt.Errorf("error getting scaler metric type: %s", err) } - s.metricType = metricType meta, err := parsePredictKubeMetadata(config) if err != nil { - predictKubeLog.Error(err, "error parsing PredictKube metadata") + logger.Error(err, "error parsing PredictKube metadata") return nil, fmt.Errorf("error parsing PredictKube metadata: %3s", err) } @@ -159,13 +159,13 @@ func NewPredictKubeScaler(ctx context.Context, config *ScalerConfig) (*PredictKu err = s.initPredictKubePrometheusConn(ctx) if err != nil { - predictKubeLog.Error(err, "error create Prometheus client and API objects") + logger.Error(err, "error create Prometheus client and API objects") return nil, fmt.Errorf("error create Prometheus client and API objects: %3s", err) } err = s.setupClientConn() if err != nil { - predictKubeLog.Error(err, "error init GRPC client") + logger.Error(err, "error init GRPC client") return nil, fmt.Errorf("error init GRPC client: %3s", err) } @@ -222,17 +222,17 @@ func (s *PredictKubeScaler) GetMetricSpecForScaling(context.Context) []v2beta2.M func (s *PredictKubeScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { value, err := s.doPredictRequest(ctx) if err != nil { - predictKubeLog.Error(err, "error executing query to predict controller service") + s.logger.Error(err, "error executing query to predict controller service") return []external_metrics.ExternalMetricValue{}, err } if value == 0 { err = errors.New("empty response after predict request") - predictKubeLog.Error(err, "") + s.logger.Error(err, "") return nil, err } - predictKubeLog.V(1).Info(fmt.Sprintf("predict value is: %f", value)) + s.logger.V(1).Info(fmt.Sprintf("predict value is: %f", value)) metric := GenerateMetricInMili(metricName, value) @@ -285,7 +285,7 @@ func (s *PredictKubeScaler) doQuery(ctx context.Context) ([]*commonproto.Item, e val, warns, err := s.api.QueryRange(ctx, s.metadata.query, r) if len(warns) > 0 { - predictKubeLog.V(1).Info("warnings", warns) + s.logger.V(1).Info("warnings", warns) } if err != nil { @@ -473,7 +473,7 @@ func (s *PredictKubeScaler) initPredictKubePrometheusConn(ctx context.Context) ( authentication.FastHTTP, s.metadata.prometheusAuth, ); err != nil { - predictKubeLog.V(1).Error(err, "init Prometheus client http transport") + s.logger.V(1).Error(err, "init Prometheus client http transport") return err } @@ -481,7 +481,7 @@ func (s *PredictKubeScaler) initPredictKubePrometheusConn(ctx context.Context) ( Address: s.metadata.prometheusAddress, RoundTripper: roundTripper, }); err != nil { - predictKubeLog.V(1).Error(err, "init Prometheus client") + s.logger.V(1).Error(err, "init Prometheus client") return err } diff --git a/pkg/scalers/prometheus_scaler.go b/pkg/scalers/prometheus_scaler.go index a1eaf2375c4..0e98ffe5d83 100644 --- a/pkg/scalers/prometheus_scaler.go +++ b/pkg/scalers/prometheus_scaler.go @@ -10,10 +10,10 @@ import ( "strconv" "time" + "github.com/go-logr/logr" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" "github.com/kedacore/keda/v2/pkg/scalers/authentication" kedautil "github.com/kedacore/keda/v2/pkg/util" @@ -39,6 +39,7 @@ type prometheusScaler struct { metricType v2beta2.MetricTargetType metadata *prometheusMetadata httpClient *http.Client + logger logr.Logger } type prometheusMetadata struct { @@ -70,8 +71,6 @@ type promQueryResult struct { } `json:"data"` } -var prometheusLog = logf.Log.WithName("prometheus_scaler") - // NewPrometheusScaler creates a new prometheusScaler func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -79,6 +78,8 @@ func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "prometheus_scaler") + meta, err := parsePrometheusMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing prometheus metadata: %s", err) @@ -92,7 +93,7 @@ func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) { authentication.NetHTTP, meta.prometheusAuth, ); err != nil { - predictKubeLog.V(1).Error(err, "init Prometheus client http transport") + logger.V(1).Error(err, "init Prometheus client http transport") return nil, err } } @@ -101,6 +102,7 @@ func NewPrometheusScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, httpClient: httpClient, + logger: logger, }, nil } @@ -178,7 +180,7 @@ func parsePrometheusMetadata(config *ScalerConfig) (meta *prometheusMetadata, er func (s *prometheusScaler) IsActive(ctx context.Context) (bool, error) { val, err := s.ExecutePromQuery(ctx) if err != nil { - prometheusLog.Error(err, "error executing prometheus query") + s.logger.Error(err, "error executing prometheus query") return false, err } @@ -240,7 +242,9 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error _ = r.Body.Close() if !(r.StatusCode >= 200 && r.StatusCode <= 299) { - return -1, fmt.Errorf("prometheus query api returned error. status: %d response: %s", r.StatusCode, string(b)) + err := fmt.Errorf("prometheus query api returned error. status: %d response: %s", r.StatusCode, string(b)) + s.logger.Error(err, "prometheus query api returned error") + return -1, err } var result promQueryResult @@ -273,10 +277,10 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error val := result.Data.Result[0].Value[1] if val != nil { - s := val.(string) - v, err = strconv.ParseFloat(s, 64) + str := val.(string) + v, err = strconv.ParseFloat(str, 64) if err != nil { - prometheusLog.Error(err, "Error converting prometheus value", "prometheus_value", s) + s.logger.Error(err, "Error converting prometheus value", "prometheus_value", str) return -1, err } } @@ -287,7 +291,7 @@ func (s *prometheusScaler) ExecutePromQuery(ctx context.Context) (float64, error func (s *prometheusScaler) GetMetrics(ctx context.Context, metricName string, _ labels.Selector) ([]external_metrics.ExternalMetricValue, error) { val, err := s.ExecutePromQuery(ctx) if err != nil { - prometheusLog.Error(err, "error executing prometheus query") + s.logger.Error(err, "error executing prometheus query") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/rabbitmq_scaler.go b/pkg/scalers/rabbitmq_scaler.go index 5019d8c841c..1b9ad98c1fd 100644 --- a/pkg/scalers/rabbitmq_scaler.go +++ b/pkg/scalers/rabbitmq_scaler.go @@ -11,11 +11,11 @@ import ( "strconv" "time" + "github.com/go-logr/logr" "github.com/streadway/amqp" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -57,6 +57,7 @@ type rabbitMQScaler struct { connection *amqp.Connection channel *amqp.Channel httpClient *http.Client + logger logr.Logger } type rabbitMQMetadata struct { @@ -96,8 +97,6 @@ type publishDetail struct { Rate float64 `json:"rate"` } -var rabbitmqLog = logf.Log.WithName("rabbitmq_scaler") - // NewRabbitMQScaler creates a new rabbitMQ scaler func NewRabbitMQScaler(config *ScalerConfig) (Scaler, error) { s := &rabbitMQScaler{} @@ -108,6 +107,8 @@ func NewRabbitMQScaler(config *ScalerConfig) (Scaler, error) { } s.metricType = metricType + s.logger = InitializeLogger(config, "rabbitmq_scaler") + meta, err := parseRabbitMQMetadata(config) if err != nil { return nil, fmt.Errorf("error parsing rabbitmq metadata: %s", err) @@ -361,7 +362,7 @@ func (s *rabbitMQScaler) Close(context.Context) error { if s.connection != nil { err := s.connection.Close() if err != nil { - rabbitmqLog.Error(err, "Error closing rabbitmq connection") + s.logger.Error(err, "Error closing rabbitmq connection") return err } } diff --git a/pkg/scalers/redis_scaler.go b/pkg/scalers/redis_scaler.go index 8eafe2338f5..312bea606f9 100644 --- a/pkg/scalers/redis_scaler.go +++ b/pkg/scalers/redis_scaler.go @@ -7,11 +7,11 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" "github.com/go-redis/redis/v8" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -29,6 +29,7 @@ type redisScaler struct { metadata *redisMetadata closeFn func() error getListLengthFn func(context.Context) (int64, error) + logger logr.Logger } type redisConnectionInfo struct { @@ -51,8 +52,6 @@ type redisMetadata struct { scalerIndex int } -var redisLog = logf.Log.WithName("redis_scaler") - // NewRedisScaler creates a new redisScaler func NewRedisScaler(ctx context.Context, isClustered, isSentinel bool, config *ScalerConfig) (Scaler, error) { luaScript := ` @@ -74,28 +73,30 @@ func NewRedisScaler(ctx context.Context, isClustered, isSentinel bool, config *S return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "redis_scaler") + if isClustered { meta, err := parseRedisMetadata(config, parseRedisClusterAddress) if err != nil { return nil, fmt.Errorf("error parsing redis metadata: %s", err) } - return createClusteredRedisScaler(ctx, meta, luaScript, metricType) + return createClusteredRedisScaler(ctx, meta, luaScript, metricType, logger) } else if isSentinel { meta, err := parseRedisMetadata(config, parseRedisSentinelAddress) if err != nil { return nil, fmt.Errorf("error parsing redis metadata: %s", err) } - return createSentinelRedisScaler(ctx, meta, luaScript, metricType) + return createSentinelRedisScaler(ctx, meta, luaScript, metricType, logger) } meta, err := parseRedisMetadata(config, parseRedisAddress) if err != nil { return nil, fmt.Errorf("error parsing redis metadata: %s", err) } - return createRedisScaler(ctx, meta, luaScript, metricType) + return createRedisScaler(ctx, meta, luaScript, metricType, logger) } -func createClusteredRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createClusteredRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisClusterClient(ctx, meta.connectionInfo) if err != nil { return nil, fmt.Errorf("connection to redis cluster failed: %s", err) @@ -103,7 +104,7 @@ func createClusteredRedisScaler(ctx context.Context, meta *redisMetadata, script closeFn := func() error { if err := client.Close(); err != nil { - redisLog.Error(err, "error closing redis client") + logger.Error(err, "error closing redis client") return err } return nil @@ -123,31 +124,32 @@ func createClusteredRedisScaler(ctx context.Context, meta *redisMetadata, script metadata: meta, closeFn: closeFn, getListLengthFn: listLengthFn, + logger: logger, }, nil } -func createSentinelRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createSentinelRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisSentinelClient(ctx, meta.connectionInfo, meta.databaseIndex) if err != nil { return nil, fmt.Errorf("connection to redis sentinel failed: %s", err) } - return createRedisScalerWithClient(client, meta, script, metricType), nil + return createRedisScalerWithClient(client, meta, script, metricType, logger), nil } -func createRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createRedisScaler(ctx context.Context, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisClient(ctx, meta.connectionInfo, meta.databaseIndex) if err != nil { return nil, fmt.Errorf("connection to redis failed: %s", err) } - return createRedisScalerWithClient(client, meta, script, metricType), nil + return createRedisScalerWithClient(client, meta, script, metricType, logger), nil } -func createRedisScalerWithClient(client *redis.Client, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType) Scaler { +func createRedisScalerWithClient(client *redis.Client, meta *redisMetadata, script string, metricType v2beta2.MetricTargetType, logger logr.Logger) Scaler { closeFn := func() error { if err := client.Close(); err != nil { - redisLog.Error(err, "error closing redis client") + logger.Error(err, "error closing redis client") return err } return nil @@ -211,7 +213,7 @@ func (s *redisScaler) IsActive(ctx context.Context) (bool, error) { length, err := s.getListLengthFn(ctx) if err != nil { - redisLog.Error(err, "error") + s.logger.Error(err, "error") return false, err } @@ -242,7 +244,7 @@ func (s *redisScaler) GetMetrics(ctx context.Context, metricName string, metricS listLen, err := s.getListLengthFn(ctx) if err != nil { - redisLog.Error(err, "error getting list length") + s.logger.Error(err, "error getting list length") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/redis_scaler_test.go b/pkg/scalers/redis_scaler_test.go index 2b27247f4f5..5d0174368a0 100644 --- a/pkg/scalers/redis_scaler_test.go +++ b/pkg/scalers/redis_scaler_test.go @@ -5,6 +5,7 @@ import ( "errors" "testing" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" ) @@ -88,6 +89,7 @@ func TestRedisGetMetricSpecForScaling(t *testing.T) { meta, closeFn, lengthFn, + logr.Logger{}, } metricSpec := mockRedisScaler.GetMetricSpecForScaling(context.Background()) diff --git a/pkg/scalers/redis_streams_scaler.go b/pkg/scalers/redis_streams_scaler.go index 6793d9876ad..b472393256d 100644 --- a/pkg/scalers/redis_streams_scaler.go +++ b/pkg/scalers/redis_streams_scaler.go @@ -5,10 +5,10 @@ import ( "fmt" "strconv" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -33,6 +33,7 @@ type redisStreamsScaler struct { metadata *redisStreamsMetadata closeFn func() error getPendingEntriesCountFn func(ctx context.Context) (int64, error) + logger logr.Logger } type redisStreamsMetadata struct { @@ -44,8 +45,6 @@ type redisStreamsMetadata struct { scalerIndex int } -var redisStreamsLog = logf.Log.WithName("redis_streams_scaler") - // NewRedisStreamsScaler creates a new redisStreamsScaler func NewRedisStreamsScaler(ctx context.Context, isClustered, isSentinel bool, config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -53,27 +52,29 @@ func NewRedisStreamsScaler(ctx context.Context, isClustered, isSentinel bool, co return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "redis_streams_scaler") + if isClustered { meta, err := parseRedisStreamsMetadata(config, parseRedisClusterAddress) if err != nil { return nil, fmt.Errorf("error parsing redis streams metadata: %s", err) } - return createClusteredRedisStreamsScaler(ctx, meta, metricType) + return createClusteredRedisStreamsScaler(ctx, meta, metricType, logger) } else if isSentinel { meta, err := parseRedisStreamsMetadata(config, parseRedisSentinelAddress) if err != nil { return nil, fmt.Errorf("error parsing redis streams metadata: %s", err) } - return createSentinelRedisStreamsScaler(ctx, meta, metricType) + return createSentinelRedisStreamsScaler(ctx, meta, metricType, logger) } meta, err := parseRedisStreamsMetadata(config, parseRedisAddress) if err != nil { return nil, fmt.Errorf("error parsing redis streams metadata: %s", err) } - return createRedisStreamsScaler(ctx, meta, metricType) + return createRedisStreamsScaler(ctx, meta, metricType, logger) } -func createClusteredRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createClusteredRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisClusterClient(ctx, meta.connectionInfo) if err != nil { return nil, fmt.Errorf("connection to redis cluster failed: %s", err) @@ -81,7 +82,7 @@ func createClusteredRedisStreamsScaler(ctx context.Context, meta *redisStreamsMe closeFn := func() error { if err := client.Close(); err != nil { - redisStreamsLog.Error(err, "error closing redis client") + logger.Error(err, "error closing redis client") return err } return nil @@ -103,7 +104,7 @@ func createClusteredRedisStreamsScaler(ctx context.Context, meta *redisStreamsMe }, nil } -func createSentinelRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createSentinelRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisSentinelClient(ctx, meta.connectionInfo, meta.databaseIndex) if err != nil { return nil, fmt.Errorf("connection to redis sentinel failed: %s", err) @@ -111,7 +112,7 @@ func createSentinelRedisStreamsScaler(ctx context.Context, meta *redisStreamsMet closeFn := func() error { if err := client.Close(); err != nil { - redisStreamsLog.Error(err, "error closing redis client") + logger.Error(err, "error closing redis client") return err } return nil @@ -133,7 +134,7 @@ func createSentinelRedisStreamsScaler(ctx context.Context, meta *redisStreamsMet }, nil } -func createRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType) (Scaler, error) { +func createRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, metricType v2beta2.MetricTargetType, logger logr.Logger) (Scaler, error) { client, err := getRedisClient(ctx, meta.connectionInfo, meta.databaseIndex) if err != nil { return nil, fmt.Errorf("connection to redis failed: %s", err) @@ -141,7 +142,7 @@ func createRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, m closeFn := func() error { if err := client.Close(); err != nil { - redisStreamsLog.Error(err, "error closing redis client") + logger.Error(err, "error closing redis client") return err } return nil @@ -160,6 +161,7 @@ func createRedisStreamsScaler(ctx context.Context, meta *redisStreamsMetadata, m metadata: meta, closeFn: closeFn, getPendingEntriesCountFn: pendingEntriesCountFn, + logger: logger, }, nil } @@ -212,7 +214,7 @@ func (s *redisStreamsScaler) IsActive(ctx context.Context) (bool, error) { count, err := s.getPendingEntriesCountFn(ctx) if err != nil { - redisStreamsLog.Error(err, "error") + s.logger.Error(err, "error") return false, err } @@ -240,7 +242,7 @@ func (s *redisStreamsScaler) GetMetrics(ctx context.Context, metricName string, pendingEntriesCount, err := s.getPendingEntriesCountFn(ctx) if err != nil { - redisStreamsLog.Error(err, "error fetching pending entries count") + s.logger.Error(err, "error fetching pending entries count") return []external_metrics.ExternalMetricValue{}, err } diff --git a/pkg/scalers/redis_streams_scaler_test.go b/pkg/scalers/redis_streams_scaler_test.go index cfecf11a469..f6c0e8cf3c1 100644 --- a/pkg/scalers/redis_streams_scaler_test.go +++ b/pkg/scalers/redis_streams_scaler_test.go @@ -6,6 +6,7 @@ import ( "strconv" "testing" + "github.com/go-logr/logr" "github.com/stretchr/testify/assert" ) @@ -145,7 +146,7 @@ func TestRedisStreamsGetMetricSpecForScaling(t *testing.T) { } closeFn := func() error { return nil } getPendingEntriesCountFn := func(ctx context.Context) (int64, error) { return -1, nil } - mockRedisStreamsScaler := redisStreamsScaler{"", meta, closeFn, getPendingEntriesCountFn} + mockRedisStreamsScaler := redisStreamsScaler{"", meta, closeFn, getPendingEntriesCountFn, logr.Logger{}} metricSpec := mockRedisStreamsScaler.GetMetricSpecForScaling(context.Background()) metricName := metricSpec[0].External.Metric.Name diff --git a/pkg/scalers/scaler.go b/pkg/scalers/scaler.go index e5621eb88c9..22fe8eb83f4 100644 --- a/pkg/scalers/scaler.go +++ b/pkg/scalers/scaler.go @@ -22,12 +22,14 @@ import ( "strings" "time" + "github.com/go-logr/logr" metrics "github.com/rcrowley/go-metrics" "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" + logf "sigs.k8s.io/controller-runtime/pkg/log" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" ) @@ -64,15 +66,18 @@ type PushScaler interface { // ScalerConfig contains config fields common for all scalers type ScalerConfig struct { - // Name used for external scalers - Name string + // ScalableObjectName specifies name of the ScaledObject/ScaledJob that owns this scaler + ScalableObjectName string + + // ScalableObjectNamespace specifies name of the ScaledObject/ScaledJob that owns this scaler + ScalableObjectNamespace string + + // ScalableObjectType specifies whether this Scaler is owned by ScaledObject or ScaledJob + ScalableObjectType string // The timeout to be used on all HTTP requests from the controller GlobalHTTPTimeout time.Duration - // Namespace used for external scalers - Namespace string - // TriggerMetadata TriggerMetadata map[string]string @@ -127,6 +132,10 @@ func RemoveIndexFromMetricName(scalerIndex int, metricName string) (string, erro return metricNameWithoutIndex, nil } +func InitializeLogger(config *ScalerConfig, scalerName string) logr.Logger { + return logf.Log.WithName(scalerName).WithValues("type", config.ScalableObjectType, "namespace", config.ScalableObjectNamespace, "name", config.ScalableObjectName) +} + // GetMetricTargetType helps getting the metric target type of the scaler func GetMetricTargetType(config *ScalerConfig) (v2beta2.MetricTargetType, error) { switch config.MetricType { diff --git a/pkg/scalers/selenium_grid_scaler.go b/pkg/scalers/selenium_grid_scaler.go index 44b758ebe77..918a1215e5b 100644 --- a/pkg/scalers/selenium_grid_scaler.go +++ b/pkg/scalers/selenium_grid_scaler.go @@ -12,10 +12,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -24,6 +24,7 @@ type seleniumGridScaler struct { metricType v2beta2.MetricTargetType metadata *seleniumGridScalerMetadata client *http.Client + logger logr.Logger } type seleniumGridScalerMetadata struct { @@ -71,14 +72,14 @@ const ( DefaultBrowserVersion string = "latest" ) -var seleniumGridLog = logf.Log.WithName("selenium_grid_scaler") - func NewSeleniumGridScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) if err != nil { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, "selenium_grid_scaler") + meta, err := parseSeleniumGridScalerMetadata(config) if err != nil { @@ -91,6 +92,7 @@ func NewSeleniumGridScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: meta, client: httpClient, + logger: logger, }, nil } @@ -150,7 +152,7 @@ func (s *seleniumGridScaler) Close(context.Context) error { } func (s *seleniumGridScaler) GetMetrics(ctx context.Context, metricName string, metricSelector labels.Selector) ([]external_metrics.ExternalMetricValue, error) { - sessions, err := s.getSessionsCount(ctx) + sessions, err := s.getSessionsCount(ctx, s.logger) if err != nil { return []external_metrics.ExternalMetricValue{}, fmt.Errorf("error requesting selenium grid endpoint: %s", err) } @@ -175,7 +177,7 @@ func (s *seleniumGridScaler) GetMetricSpecForScaling(context.Context) []v2beta2. } func (s *seleniumGridScaler) IsActive(ctx context.Context) (bool, error) { - v, err := s.getSessionsCount(ctx) + v, err := s.getSessionsCount(ctx, s.logger) if err != nil { return false, err } @@ -183,7 +185,7 @@ func (s *seleniumGridScaler) IsActive(ctx context.Context) (bool, error) { return v > s.metadata.activationThreshold, nil } -func (s *seleniumGridScaler) getSessionsCount(ctx context.Context) (int64, error) { +func (s *seleniumGridScaler) getSessionsCount(ctx context.Context, logger logr.Logger) (int64, error) { body, err := json.Marshal(map[string]string{ "query": "{ grid { maxSession, nodeCount }, sessionsInfo { sessionQueueRequests, sessions { id, capabilities, nodeId } } }", }) @@ -212,14 +214,14 @@ func (s *seleniumGridScaler) getSessionsCount(ctx context.Context) (int64, error if err != nil { return -1, err } - v, err := getCountFromSeleniumResponse(b, s.metadata.browserName, s.metadata.browserVersion, s.metadata.sessionBrowserName) + v, err := getCountFromSeleniumResponse(b, s.metadata.browserName, s.metadata.browserVersion, s.metadata.sessionBrowserName, logger) if err != nil { return -1, err } return v, nil } -func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion string, sessionBrowserName string) (int64, error) { +func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion string, sessionBrowserName string, logger logr.Logger) (int64, error) { var count int64 var seleniumResponse = seleniumResponse{} @@ -239,7 +241,7 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s } } } else { - seleniumGridLog.Error(err, fmt.Sprintf("Error when unmarshaling session queue requests: %s", err)) + logger.Error(err, fmt.Sprintf("Error when unmarshaling session queue requests: %s", err)) } } @@ -255,7 +257,7 @@ func getCountFromSeleniumResponse(b []byte, browserName string, browserVersion s } } } else { - seleniumGridLog.Error(err, fmt.Sprintf("Error when unmarshaling sessions info: %s", err)) + logger.Error(err, fmt.Sprintf("Error when unmarshaling sessions info: %s", err)) } } diff --git a/pkg/scalers/selenium_grid_scaler_test.go b/pkg/scalers/selenium_grid_scaler_test.go index 869458a9d43..f103d81e9ae 100644 --- a/pkg/scalers/selenium_grid_scaler_test.go +++ b/pkg/scalers/selenium_grid_scaler_test.go @@ -3,6 +3,8 @@ package scalers import ( "reflect" "testing" + + "github.com/go-logr/logr" ) func Test_getCountFromSeleniumResponse(t *testing.T) { @@ -320,7 +322,7 @@ func Test_getCountFromSeleniumResponse(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - got, err := getCountFromSeleniumResponse(tt.args.b, tt.args.browserName, tt.args.browserVersion, tt.args.sessionBrowserName) + got, err := getCountFromSeleniumResponse(tt.args.b, tt.args.browserName, tt.args.browserVersion, tt.args.sessionBrowserName, logr.Logger{}) if (err != nil) != tt.wantErr { t.Errorf("getCountFromSeleniumResponse() error = %v, wantErr %v", err, tt.wantErr) return diff --git a/pkg/scalers/solace_scaler.go b/pkg/scalers/solace_scaler.go index a0439696f16..3a7ef3d69dc 100644 --- a/pkg/scalers/solace_scaler.go +++ b/pkg/scalers/solace_scaler.go @@ -8,10 +8,10 @@ import ( "strconv" "strings" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -61,6 +61,7 @@ type SolaceScaler struct { metricType v2beta2.MetricTargetType metadata *SolaceMetadata httpClient *http.Client + logger logr.Logger } type SolaceMetadata struct { @@ -111,9 +112,6 @@ type solaceSEMPMetadata struct { ResponseCode int `json:"responseCode"` } -// Solace Logger -var solaceLog = logf.Log.WithName(solaceScalerID + "_scaler") - // Constructor for SolaceScaler func NewSolaceScaler(config *ScalerConfig) (Scaler, error) { // Create HTTP Client @@ -124,10 +122,12 @@ func NewSolaceScaler(config *ScalerConfig) (Scaler, error) { return nil, fmt.Errorf("error getting scaler metric type: %s", err) } + logger := InitializeLogger(config, solaceScalerID+"_scaler") + // Parse Solace Metadata solaceMetadata, err := parseSolaceMetadata(config) if err != nil { - solaceLog.Error(err, "Error parsing Solace Trigger Metadata or missing values") + logger.Error(err, "Error parsing Solace Trigger Metadata or missing values") return nil, err } @@ -135,6 +135,7 @@ func NewSolaceScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: solaceMetadata, httpClient: httpClient, + logger: logger, }, nil } @@ -351,7 +352,7 @@ func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string, metric var metricValues, mv SolaceMetricValues var mve error if mv, mve = s.getSolaceQueueMetricsFromSEMP(ctx); mve != nil { - solaceLog.Error(mve, "call to semp endpoint failed") + s.logger.Error(mve, "call to semp endpoint failed") return []external_metrics.ExternalMetricValue{}, mve } metricValues = mv @@ -365,7 +366,7 @@ func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string, metric default: // Should never end up here err := fmt.Errorf("unidentified metric: %s", metricName) - solaceLog.Error(err, "returning error to calling app") + s.logger.Error(err, "returning error to calling app") return []external_metrics.ExternalMetricValue{}, err } return append([]external_metrics.ExternalMetricValue{}, metric), nil @@ -377,7 +378,7 @@ func (s *SolaceScaler) GetMetrics(ctx context.Context, metricName string, metric func (s *SolaceScaler) IsActive(ctx context.Context) (bool, error) { metricValues, err := s.getSolaceQueueMetricsFromSEMP(ctx) if err != nil { - solaceLog.Error(err, "call to semp endpoint failed") + s.logger.Error(err, "call to semp endpoint failed") return false, err } return (metricValues.msgCount > s.metadata.activationMsgCountTarget || metricValues.msgSpoolUsage > s.metadata.activationMsgSpoolUsageTarget), nil diff --git a/pkg/scalers/stan_scaler.go b/pkg/scalers/stan_scaler.go index b951dd42a56..fe248d1c3e6 100644 --- a/pkg/scalers/stan_scaler.go +++ b/pkg/scalers/stan_scaler.go @@ -8,10 +8,10 @@ import ( "net/http" "strconv" + "github.com/go-logr/logr" v2beta2 "k8s.io/api/autoscaling/v2beta2" "k8s.io/apimachinery/pkg/labels" "k8s.io/metrics/pkg/apis/external_metrics" - logf "sigs.k8s.io/controller-runtime/pkg/log" kedautil "github.com/kedacore/keda/v2/pkg/util" ) @@ -41,6 +41,7 @@ type stanScaler struct { metricType v2beta2.MetricTargetType metadata stanMetadata httpClient *http.Client + logger logr.Logger } type stanMetadata struct { @@ -58,8 +59,6 @@ const ( defaultStanLagThreshold = 10 ) -var stanLog = logf.Log.WithName("stan_scaler") - // NewStanScaler creates a new stanScaler func NewStanScaler(config *ScalerConfig) (Scaler, error) { metricType, err := GetMetricTargetType(config) @@ -77,6 +76,7 @@ func NewStanScaler(config *ScalerConfig) (Scaler, error) { metricType: metricType, metadata: stanMetadata, httpClient: kedautil.CreateHTTPClient(config.GlobalHTTPTimeout, false), + logger: InitializeLogger(config, "stan_scaler"), }, nil } @@ -136,7 +136,7 @@ func (s *stanScaler) IsActive(ctx context.Context) (bool, error) { } resp, err := s.httpClient.Do(req) if err != nil { - stanLog.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) + s.logger.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) return false, err } @@ -151,9 +151,9 @@ func (s *stanScaler) IsActive(ctx context.Context) (bool, error) { } defer baseResp.Body.Close() if baseResp.StatusCode == 404 { - stanLog.Info("Streaming broker endpoint returned 404. Please ensure it has been created", "url", monitoringEndpoint, "channelName", s.metadata.subject) + s.logger.Info("Streaming broker endpoint returned 404. Please ensure it has been created", "url", monitoringEndpoint, "channelName", s.metadata.subject) } else { - stanLog.Info("Unable to connect to STAN. Please ensure you have configured the ScaledObject with the correct endpoint.", "baseResp.StatusCode", baseResp.StatusCode, "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) + s.logger.Info("Unable to connect to STAN. Please ensure you have configured the ScaledObject with the correct endpoint.", "baseResp.StatusCode", baseResp.StatusCode, "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) } return false, err @@ -161,7 +161,7 @@ func (s *stanScaler) IsActive(ctx context.Context) (bool, error) { defer resp.Body.Close() if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil { - stanLog.Error(err, "Unable to decode channel info as %v", err) + s.logger.Error(err, "Unable to decode channel info as %v", err) return false, err } return s.hasPendingMessage() || s.getMaxMsgLag() > s.metadata.activationLagThreshold, nil @@ -205,7 +205,7 @@ func (s *stanScaler) hasPendingMessage() bool { } if !subscriberFound { - stanLog.Info("The STAN subscription was not found.", "combinedQueueName", combinedQueueName) + s.logger.Info("The STAN subscription was not found.", "combinedQueueName", combinedQueueName) } return false @@ -234,17 +234,17 @@ func (s *stanScaler) GetMetrics(ctx context.Context, metricName string, metricSe resp, err := s.httpClient.Do(req) if err != nil { - stanLog.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) + s.logger.Error(err, "Unable to access the nats streaming broker monitoring endpoint", "natsServerMonitoringEndpoint", s.metadata.natsServerMonitoringEndpoint) return []external_metrics.ExternalMetricValue{}, err } defer resp.Body.Close() if err := json.NewDecoder(resp.Body).Decode(&s.channelInfo); err != nil { - stanLog.Error(err, "Unable to decode channel info as %v", err) + s.logger.Error(err, "Unable to decode channel info as %v", err) return []external_metrics.ExternalMetricValue{}, err } totalLag := s.getMaxMsgLag() - stanLog.V(1).Info("Stan scaler: Providing metrics based on totalLag, threshold", "totalLag", totalLag, "lagThreshold", s.metadata.lagThreshold) + s.logger.V(1).Info("Stan scaler: Providing metrics based on totalLag, threshold", "totalLag", totalLag, "lagThreshold", s.metadata.lagThreshold) metric := GenerateMetricInMili(metricName, float64(totalLag)) return append([]external_metrics.ExternalMetricValue{}, metric), nil } diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index d505509d189..3035f0bdd00 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -306,14 +306,15 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp } } config := &scalers.ScalerConfig{ - Name: withTriggers.Name, - Namespace: withTriggers.Namespace, - TriggerMetadata: trigger.Metadata, - ResolvedEnv: resolvedEnv, - AuthParams: make(map[string]string), - GlobalHTTPTimeout: h.globalHTTPTimeout, - ScalerIndex: triggerIndex, - MetricType: trigger.MetricType, + ScalableObjectName: withTriggers.Name, + ScalableObjectNamespace: withTriggers.Namespace, + ScalableObjectType: withTriggers.Kind, + TriggerMetadata: trigger.Metadata, + ResolvedEnv: resolvedEnv, + AuthParams: make(map[string]string), + GlobalHTTPTimeout: h.globalHTTPTimeout, + ScalerIndex: triggerIndex, + MetricType: trigger.MetricType, } config.AuthParams, config.PodIdentity, err = resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace)