From 20702d230db1356369017a8d7278e14a72c11709 Mon Sep 17 00:00:00 2001 From: kevin Date: Mon, 19 Sep 2022 13:59:51 +0800 Subject: [PATCH] Support Restrict Secret Access Support Restrict Secret Access, refer to https://github.com/kedacore/keda/issues/3668 Signed-off-by: kevin --- adapter/main.go | 24 ++++- controllers/keda/scaledjob_controller.go | 13 +-- controllers/keda/scaledobject_controller.go | 1 - main.go | 19 +++- .../resolver/azure_keyvault_handler.go | 10 +-- pkg/scaling/resolver/scale_resolvers.go | 89 ++++++++++--------- pkg/scaling/scale_handler.go | 9 +- pkg/util/env_resolver.go | 21 +++++ 8 files changed, 127 insertions(+), 59 deletions(-) diff --git a/adapter/main.go b/adapter/main.go index b4e0ca786d7..694e2111c1d 100644 --- a/adapter/main.go +++ b/adapter/main.go @@ -28,7 +28,10 @@ import ( "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" + kubeinformers "k8s.io/client-go/informers" + "k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" "k8s.io/klog/v2" "k8s.io/klog/v2/klogr" @@ -131,7 +134,19 @@ func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Durat broadcaster := record.NewBroadcaster() recorder := broadcaster.NewRecorder(scheme, corev1.EventSource{Component: "keda-metrics-adapter"}) - handler := scaling.NewScaleHandler(mgr.GetClient(), nil, scheme, globalHTTPTimeout, recorder) + + kubeClientset, _ := kubernetes.NewForConfig(ctrl.GetConfigOrDie()) + objectNamespace, err := kedautil.GetClusterObjectNamespace() + if err != nil { + logger.Error(err, "Unable to get cluster object namespace") + return nil, nil, err + } + kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClientset, 1*time.Second, kubeinformers.WithNamespace(objectNamespace)) + secretInformer := kubeInformerFactory.Core().V1().Secrets() + + handler := scaling.NewScaleHandler(mgr.GetClient(), nil, scheme, globalHTTPTimeout, recorder, secretInformer.Lister()) + kubeInformerFactory.Start(ctx.Done()) + externalMetricsInfo := &[]provider.ExternalMetricInfo{} externalMetricsInfoLock := &sync.RWMutex{} @@ -146,13 +161,13 @@ func (a *Adapter) makeProvider(ctx context.Context, globalHTTPTimeout time.Durat } stopCh := make(chan struct{}) - if err := runScaledObjectController(ctx, mgr, handler, logger, externalMetricsInfo, externalMetricsInfoLock, maxConcurrentReconciles, stopCh); err != nil { + if err := runScaledObjectController(ctx, mgr, handler, logger, externalMetricsInfo, externalMetricsInfoLock, maxConcurrentReconciles, stopCh, secretInformer.Informer().HasSynced); err != nil { return nil, nil, err } return kedaprovider.NewProvider(ctx, logger, handler, mgr.GetClient(), *grpcClient, useMetricsServiceGrpc, namespace, externalMetricsInfo, externalMetricsInfoLock), stopCh, nil } -func runScaledObjectController(ctx context.Context, mgr manager.Manager, scaleHandler scaling.ScaleHandler, logger logr.Logger, externalMetricsInfo *[]provider.ExternalMetricInfo, externalMetricsInfoLock *sync.RWMutex, maxConcurrentReconciles int, stopCh chan<- struct{}) error { +func runScaledObjectController(ctx context.Context, mgr manager.Manager, scaleHandler scaling.ScaleHandler, logger logr.Logger, externalMetricsInfo *[]provider.ExternalMetricInfo, externalMetricsInfoLock *sync.RWMutex, maxConcurrentReconciles int, stopCh chan<- struct{}, secretSynced cache.InformerSynced) error { if err := (&kedacontrollers.MetricsScaledObjectReconciler{ Client: mgr.GetClient(), ScaleHandler: scaleHandler, @@ -170,6 +185,9 @@ func runScaledObjectController(ctx context.Context, mgr manager.Manager, scaleHa } }() + if ok := cache.WaitForCacheSync(ctx.Done(), secretSynced); !ok { + return fmt.Errorf("failed to wait Secrets cache synced") + } return nil } diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 0a5e49b1ac0..38031ad3eef 100644 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -28,6 +28,7 @@ import ( "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" + corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/tools/cache" "k8s.io/client-go/tools/record" ctrl "sigs.k8s.io/controller-runtime" @@ -50,12 +51,13 @@ import ( // ScaledJobReconciler reconciles a ScaledJob object type ScaledJobReconciler struct { client.Client - Scheme *runtime.Scheme - GlobalHTTPTimeout time.Duration - Recorder record.EventRecorder - + Scheme *runtime.Scheme + GlobalHTTPTimeout time.Duration + Recorder record.EventRecorder scaledJobGenerations *sync.Map scaleHandler scaling.ScaleHandler + SecretsLister corev1listers.SecretLister + SecretsSynced cache.InformerSynced } type scaledJobMetricsData struct { @@ -75,9 +77,8 @@ func init() { // SetupWithManager initializes the ScaledJobReconciler instance and starts a new controller managed by the passed Manager instance. func (r *ScaledJobReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { - r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler")) + r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler"), r.SecretsLister) r.scaledJobGenerations = &sync.Map{} - return ctrl.NewControllerManagedBy(mgr). WithOptions(options). // Ignore updates to ScaledJob Status (in this case metadata.Generation does not change) diff --git a/controllers/keda/scaledobject_controller.go b/controllers/keda/scaledobject_controller.go index 24a4b4390f6..a81dba5ce9f 100644 --- a/controllers/keda/scaledobject_controller.go +++ b/controllers/keda/scaledobject_controller.go @@ -114,7 +114,6 @@ func (r *ScaledObjectReconciler) SetupWithManager(mgr ctrl.Manager, options cont if r.Recorder == nil { return fmt.Errorf("ScaledObjectReconciler.Recorder is not initialized") } - // Start controller return ctrl.NewControllerManagedBy(mgr). WithOptions(options). diff --git a/main.go b/main.go index 93d719885ad..b05f6beee15 100644 --- a/main.go +++ b/main.go @@ -17,6 +17,7 @@ limitations under the License. package main import ( + "context" "flag" "fmt" "os" @@ -26,6 +27,7 @@ import ( "github.com/spf13/pflag" apimachineryruntime "k8s.io/apimachinery/pkg/runtime" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/kubernetes" clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" ctrl "sigs.k8s.io/controller-runtime" @@ -40,6 +42,7 @@ import ( "github.com/kedacore/keda/v2/pkg/scaling" kedautil "github.com/kedacore/keda/v2/pkg/util" "github.com/kedacore/keda/v2/version" + kubeinformers "k8s.io/client-go/informers" //+kubebuilder:scaffold:imports ) @@ -158,13 +161,22 @@ func main() { globalHTTPTimeout := time.Duration(globalHTTPTimeoutMS) * time.Millisecond eventRecorder := mgr.GetEventRecorderFor("keda-operator") + kubeClientset, _ := kubernetes.NewForConfig(ctrl.GetConfigOrDie()) + objectNamespace, err := kedautil.GetClusterObjectNamespace() + if err != nil { + setupLog.Error(err, "Unable to get cluster object namespace") + os.Exit(1) + } + kubeInformerFactory := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClientset, 1*time.Second, kubeinformers.WithNamespace(objectNamespace)) + secretInformer := kubeInformerFactory.Core().V1().Secrets() + scaleClient, kubeVersion, err := k8s.InitScaleClient(mgr) if err != nil { setupLog.Error(err, "unable to init scale client") os.Exit(1) } - scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder) + scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder, secretInformer.Lister()) if err = (&kedacontrollers.ScaledObjectReconciler{ Client: mgr.GetClient(), @@ -181,6 +193,8 @@ func main() { Scheme: mgr.GetScheme(), GlobalHTTPTimeout: globalHTTPTimeout, Recorder: eventRecorder, + SecretsLister: secretInformer.Lister(), + SecretsSynced: secretInformer.Informer().HasSynced, }).SetupWithManager(mgr, controller.Options{MaxConcurrentReconciles: scaledJobMaxReconciles}); err != nil { setupLog.Error(err, "unable to create controller", "controller", "ScaledJob") os.Exit(1) @@ -223,6 +237,9 @@ func main() { setupLog.Info(fmt.Sprintf("Go OS/Arch: %s/%s", runtime.GOOS, runtime.GOARCH)) setupLog.Info(fmt.Sprintf("Running on Kubernetes %s", kubeVersion.PrettyVersion), "version", kubeVersion.Version) + ctx := context.Background() + kubeInformerFactory.Start(ctx.Done()) + if err := mgr.Start(ctrl.SetupSignalHandler()); err != nil { setupLog.Error(err, "problem running manager") os.Exit(1) diff --git a/pkg/scaling/resolver/azure_keyvault_handler.go b/pkg/scaling/resolver/azure_keyvault_handler.go index a76d387b572..3baf2e53db1 100644 --- a/pkg/scaling/resolver/azure_keyvault_handler.go +++ b/pkg/scaling/resolver/azure_keyvault_handler.go @@ -25,6 +25,7 @@ import ( az "github.com/Azure/go-autorest/autorest/azure" "github.com/Azure/go-autorest/autorest/azure/auth" "github.com/go-logr/logr" + corev1listers "k8s.io/client-go/listers/core/v1" "sigs.k8s.io/controller-runtime/pkg/client" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -42,13 +43,13 @@ func NewAzureKeyVaultHandler(v *kedav1alpha1.AzureKeyVault) *AzureKeyVaultHandle } } -func (vh *AzureKeyVaultHandler) Initialize(ctx context.Context, client client.Client, logger logr.Logger, triggerNamespace string) error { +func (vh *AzureKeyVaultHandler) Initialize(ctx context.Context, client client.Client, logger logr.Logger, triggerNamespace string, secretsLister corev1listers.SecretLister) error { keyvaultResourceURL, activeDirectoryEndpoint, err := vh.getPropertiesForCloud() if err != nil { return err } - authConfig, err := vh.getAuthConfig(ctx, client, logger, triggerNamespace, keyvaultResourceURL, activeDirectoryEndpoint) + authConfig, err := vh.getAuthConfig(ctx, client, logger, triggerNamespace, keyvaultResourceURL, activeDirectoryEndpoint, secretsLister) if err != nil { return err } @@ -101,12 +102,11 @@ func (vh *AzureKeyVaultHandler) getPropertiesForCloud() (string, string, error) } func (vh *AzureKeyVaultHandler) getAuthConfig(ctx context.Context, client client.Client, logger logr.Logger, - triggerNamespace, keyVaultResourceURL, activeDirectoryEndpoint string) (auth.AuthorizerConfig, error) { + triggerNamespace, keyVaultResourceURL, activeDirectoryEndpoint string, secretsLister corev1listers.SecretLister) (auth.AuthorizerConfig, error) { podIdentity := vh.vault.PodIdentity if podIdentity == nil { podIdentity = &kedav1alpha1.AuthPodIdentity{} } - switch podIdentity.Provider { case "", kedav1alpha1.PodIdentityProviderNone: clientID := vh.vault.Credentials.ClientID @@ -114,7 +114,7 @@ func (vh *AzureKeyVaultHandler) getAuthConfig(ctx context.Context, client client clientSecretName := vh.vault.Credentials.ClientSecret.ValueFrom.SecretKeyRef.Name clientSecretKey := vh.vault.Credentials.ClientSecret.ValueFrom.SecretKeyRef.Key - clientSecret := resolveAuthSecret(ctx, client, logger, clientSecretName, triggerNamespace, clientSecretKey) + clientSecret := resolveAuthSecret(ctx, client, logger, clientSecretName, triggerNamespace, clientSecretKey, secretsLister) if clientID == "" || tenantID == "" || clientSecret == "" { return nil, fmt.Errorf("clientID, tenantID and clientSecret are expected when not using a pod identity provider") diff --git a/pkg/scaling/resolver/scale_resolvers.go b/pkg/scaling/resolver/scale_resolvers.go index 41fe4bef4f5..4850f0660a9 100644 --- a/pkg/scaling/resolver/scale_resolvers.go +++ b/pkg/scaling/resolver/scale_resolvers.go @@ -21,17 +21,20 @@ import ( "context" "fmt" "os" + "strings" "github.com/go-logr/logr" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/types" + corev1listers "k8s.io/client-go/listers/core/v1" "knative.dev/pkg/apis/duck" duckv1 "knative.dev/pkg/apis/duck/v1" "sigs.k8s.io/controller-runtime/pkg/client" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" + "github.com/kedacore/keda/v2/pkg/util" ) const ( @@ -40,6 +43,18 @@ const ( referenceCloser = ')' ) +var kedaNamespace, _ = util.GetClusterObjectNamespace() + +// getWatchNamespace returns the namespace the operator should be watching for changes +func getRestrictSecretAccess() bool { + const RestrictSecretAccessEnvVar = "RESTRICT_SECRET_ACCESS" + restrictSecretAccess, found := os.LookupEnv(RestrictSecretAccessEnvVar) + if !found { + return false + } + return strings.ToLower(restrictSecretAccess) == "true" +} + // ResolveScaleTargetPodSpec for given scalableObject inspects the scale target workload, // which could be almost any k8s resource (Deployment, StatefulSet, CustomResource...) // and for the given resource returns *corev1.PodTemplateSpec and a name of the container @@ -102,7 +117,7 @@ func ResolveScaleTargetPodSpec(ctx context.Context, kubeClient client.Client, lo // ResolveContainerEnv resolves all environment variables in a container. // It returns either map of env variable key and value or error if there is any. -func ResolveContainerEnv(ctx context.Context, client client.Client, logger logr.Logger, podSpec *corev1.PodSpec, containerName, namespace string) (map[string]string, error) { +func ResolveContainerEnv(ctx context.Context, client client.Client, logger logr.Logger, podSpec *corev1.PodSpec, containerName, namespace string, secretsLister corev1listers.SecretLister) (map[string]string, error) { if len(podSpec.Containers) < 1 { return nil, fmt.Errorf("target object doesn't have containers") } @@ -125,15 +140,15 @@ func ResolveContainerEnv(ctx context.Context, client client.Client, logger logr. container = podSpec.Containers[0] } - return resolveEnv(ctx, client, logger, &container, namespace) + return resolveEnv(ctx, client, logger, &container, namespace, secretsLister) } // ResolveAuthRefAndPodIdentity provides authentication parameters and pod identity needed authenticate scaler with the environment. func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, podTemplateSpec *corev1.PodTemplateSpec, - namespace string) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { + namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity, error) { if podTemplateSpec != nil { - authParams, podIdentity := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace) + authParams, podIdentity := resolveAuthRef(ctx, client, logger, triggerAuthRef, &podTemplateSpec.Spec, namespace, secretsLister) if podIdentity.Provider == kedav1alpha1.PodIdentityProviderAwsEKS { serviceAccountName := podTemplateSpec.Spec.ServiceAccountName @@ -150,7 +165,7 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log return authParams, podIdentity, nil } - authParams, _ := resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace) + authParams, _ := resolveAuthRef(ctx, client, logger, triggerAuthRef, nil, namespace, secretsLister) return authParams, kedav1alpha1.AuthPodIdentity{Provider: kedav1alpha1.PodIdentityProviderNone}, nil } @@ -158,7 +173,7 @@ func ResolveAuthRefAndPodIdentity(ctx context.Context, client client.Client, log // based on authentication method defined in TriggerAuthentication, authParams and podIdentity is returned func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logger, triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, podSpec *corev1.PodSpec, - namespace string) (map[string]string, kedav1alpha1.AuthPodIdentity) { + namespace string, secretsLister corev1listers.SecretLister) (map[string]string, kedav1alpha1.AuthPodIdentity) { result := make(map[string]string) var podIdentity kedav1alpha1.AuthPodIdentity @@ -176,7 +191,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge result[e.Parameter] = "" continue } - env, err := ResolveContainerEnv(ctx, client, logger, podSpec, e.ContainerName, namespace) + env, err := ResolveContainerEnv(ctx, client, logger, podSpec, e.ContainerName, namespace, secretsLister) if err != nil { result[e.Parameter] = "" } else { @@ -186,7 +201,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.SecretTargetRef != nil { for _, e := range triggerAuthSpec.SecretTargetRef { - result[e.Parameter] = resolveAuthSecret(ctx, client, logger, e.Name, triggerNamespace, e.Key) + result[e.Parameter] = resolveAuthSecret(ctx, client, logger, e.Name, triggerNamespace, e.Key, secretsLister) } } if triggerAuthSpec.HashiCorpVault != nil && len(triggerAuthSpec.HashiCorpVault.Secrets) > 0 { @@ -216,7 +231,7 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge } if triggerAuthSpec.AzureKeyVault != nil && len(triggerAuthSpec.AzureKeyVault.Secrets) > 0 { vaultHandler := NewAzureKeyVaultHandler(triggerAuthSpec.AzureKeyVault) - err := vaultHandler.Initialize(ctx, client, logger, triggerNamespace) + err := vaultHandler.Initialize(ctx, client, logger, triggerNamespace, secretsLister) if err != nil { logger.Error(err, "Error authenticating to Azure Key Vault", "triggerAuthRef.Name", triggerAuthRef.Name) } else { @@ -237,27 +252,6 @@ func resolveAuthRef(ctx context.Context, client client.Client, logger logr.Logge return result, podIdentity } -var clusterObjectNamespaceCache *string - -func getClusterObjectNamespace() (string, error) { - // Check if a cached value is available. - if clusterObjectNamespaceCache != nil { - return *clusterObjectNamespaceCache, nil - } - env := os.Getenv("KEDA_CLUSTER_OBJECT_NAMESPACE") - if env != "" { - clusterObjectNamespaceCache = &env - return env, nil - } - data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") - if err != nil { - return "", err - } - strData := string(data) - clusterObjectNamespaceCache = &strData - return strData, nil -} - func getTriggerAuthSpec(ctx context.Context, client client.Client, triggerAuthRef *kedav1alpha1.ScaledObjectAuthRef, namespace string) (*kedav1alpha1.TriggerAuthenticationSpec, string, error) { if triggerAuthRef.Kind == "" || triggerAuthRef.Kind == "TriggerAuthentication" { triggerAuth := &kedav1alpha1.TriggerAuthentication{} @@ -267,7 +261,7 @@ func getTriggerAuthSpec(ctx context.Context, client client.Client, triggerAuthRe } return &triggerAuth.Spec, namespace, nil } else if triggerAuthRef.Kind == "ClusterTriggerAuthentication" { - clusterNamespace, err := getClusterObjectNamespace() + clusterNamespace, err := util.GetClusterObjectNamespace() if err != nil { return nil, "", err } @@ -281,7 +275,7 @@ func getTriggerAuthSpec(ctx context.Context, client client.Client, triggerAuthRe return nil, "", fmt.Errorf("unknown trigger auth kind %s", triggerAuthRef.Kind) } -func resolveEnv(ctx context.Context, client client.Client, logger logr.Logger, container *corev1.Container, namespace string) (map[string]string, error) { +func resolveEnv(ctx context.Context, client client.Client, logger logr.Logger, container *corev1.Container, namespace string, secretsLister corev1listers.SecretLister) (map[string]string, error) { resolved := make(map[string]string) if container.EnvFrom != nil { @@ -300,7 +294,7 @@ func resolveEnv(ctx context.Context, client client.Client, logger logr.Logger, c return nil, fmt.Errorf("error reading config ref %s on namespace %s/: %s", source.ConfigMapRef, namespace, err) } } else if source.SecretRef != nil { - secretsMap, err := resolveSecretMap(ctx, client, source.SecretRef, namespace) + secretsMap, err := resolveSecretMap(ctx, client, source.SecretRef, namespace, secretsLister) switch { case err == nil: for k, v := range secretsMap { @@ -330,7 +324,7 @@ func resolveEnv(ctx context.Context, client client.Client, logger logr.Logger, c switch { case envVar.ValueFrom.SecretKeyRef != nil: // env is a secret selector - value, err = resolveSecretValue(ctx, client, envVar.ValueFrom.SecretKeyRef, envVar.ValueFrom.SecretKeyRef.Key, namespace) + value, err = resolveSecretValue(ctx, client, envVar.ValueFrom.SecretKeyRef, envVar.ValueFrom.SecretKeyRef.Key, namespace, secretsLister) if err != nil { if envVar.ValueFrom.SecretKeyRef.Optional != nil && *envVar.ValueFrom.SecretKeyRef.Optional { continue @@ -420,9 +414,14 @@ func resolveConfigMap(ctx context.Context, client client.Client, configMapRef *c return configMap.Data, nil } -func resolveSecretMap(ctx context.Context, client client.Client, secretMapRef *corev1.SecretEnvSource, namespace string) (map[string]string, error) { +func resolveSecretMap(ctx context.Context, client client.Client, secretMapRef *corev1.SecretEnvSource, namespace string, secretsLister corev1listers.SecretLister) (map[string]string, error) { secret := &corev1.Secret{} - err := client.Get(ctx, types.NamespacedName{Name: secretMapRef.Name, Namespace: namespace}, secret) + var err error + if getRestrictSecretAccess() { + secret, err = secretsLister.Secrets(kedaNamespace).Get(secretMapRef.Name) + } else { + err = client.Get(ctx, types.NamespacedName{Name: secretMapRef.Name, Namespace: namespace}, secret) + } if err != nil { return nil, err } @@ -434,9 +433,14 @@ func resolveSecretMap(ctx context.Context, client client.Client, secretMapRef *c return secretsStr, nil } -func resolveSecretValue(ctx context.Context, client client.Client, secretKeyRef *corev1.SecretKeySelector, keyName, namespace string) (string, error) { +func resolveSecretValue(ctx context.Context, client client.Client, secretKeyRef *corev1.SecretKeySelector, keyName, namespace string, secretsLister corev1listers.SecretLister) (string, error) { secret := &corev1.Secret{} - err := client.Get(ctx, types.NamespacedName{Name: secretKeyRef.Name, Namespace: namespace}, secret) + var err error + if getRestrictSecretAccess() { + secret, err = secretsLister.Secrets(kedaNamespace).Get(secretKeyRef.Name) + } else { + err = client.Get(ctx, types.NamespacedName{Name: secretKeyRef.Name, Namespace: namespace}, secret) + } if err != nil { return "", err } @@ -452,14 +456,19 @@ func resolveConfigValue(ctx context.Context, client client.Client, configKeyRef return configMap.Data[keyName], nil } -func resolveAuthSecret(ctx context.Context, client client.Client, logger logr.Logger, name, namespace, key string) string { +func resolveAuthSecret(ctx context.Context, client client.Client, logger logr.Logger, name, namespace, key string, secretsLister corev1listers.SecretLister) string { if name == "" || namespace == "" || key == "" { logger.Error(fmt.Errorf("error trying to get secret"), "name, namespace and key are required", "Secret.Namespace", namespace, "Secret.Name", name, "key", key) return "" } secret := &corev1.Secret{} - err := client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + var err error + if getRestrictSecretAccess() { + secret, err = secretsLister.Secrets(kedaNamespace).Get(name) + } else { + err = client.Get(ctx, types.NamespacedName{Name: name, Namespace: namespace}, secret) + } if err != nil { logger.Error(err, "Error trying to get secret from namespace", "Secret.Namespace", namespace, "Secret.Name", name) return "" diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index cb057644346..889a131c016 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -27,6 +27,7 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + corev1listers "k8s.io/client-go/listers/core/v1" "k8s.io/client-go/scale" "k8s.io/client-go/tools/record" "k8s.io/metrics/pkg/apis/external_metrics" @@ -64,10 +65,11 @@ type scaleHandler struct { recorder record.EventRecorder scalerCaches map[string]*cache.ScalersCache lock *sync.RWMutex + secretsLister corev1listers.SecretLister } // NewScaleHandler creates a ScaleHandler object -func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder) ScaleHandler { +func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, secretsLister corev1listers.SecretLister) ScaleHandler { return &scaleHandler{ client: client, logger: logf.Log.WithName("scalehandler"), @@ -77,6 +79,7 @@ func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, recon recorder: recorder, scalerCaches: map[string]*cache.ScalersCache{}, lock: &sync.RWMutex{}, + secretsLister: secretsLister, } } @@ -466,7 +469,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp factory := func() (scalers.Scaler, *scalers.ScalerConfig, error) { if podTemplateSpec != nil { - resolvedEnv, err = resolver.ResolveContainerEnv(ctx, h.client, logger, &podTemplateSpec.Spec, containerName, withTriggers.Namespace) + resolvedEnv, err = resolver.ResolveContainerEnv(ctx, h.client, logger, &podTemplateSpec.Spec, containerName, withTriggers.Namespace, h.secretsLister) if err != nil { return nil, nil, fmt.Errorf("error resolving secrets for ScaleTarget: %s", err) } @@ -484,7 +487,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp MetricType: trigger.MetricType, } - config.AuthParams, config.PodIdentity, err = resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace) + config.AuthParams, config.PodIdentity, err = resolver.ResolveAuthRefAndPodIdentity(ctx, h.client, logger, trigger.AuthenticationRef, podTemplateSpec, withTriggers.Namespace, h.secretsLister) if err != nil { return nil, nil, err } diff --git a/pkg/util/env_resolver.go b/pkg/util/env_resolver.go index 21e6647ac2e..40835f888c1 100644 --- a/pkg/util/env_resolver.go +++ b/pkg/util/env_resolver.go @@ -22,6 +22,8 @@ import ( "time" ) +var clusterObjectNamespaceCache *string + func ResolveOsEnvBool(envName string, defaultValue bool) (bool, error) { valueStr, found := os.LookupEnv(envName) @@ -52,3 +54,22 @@ func ResolveOsEnvDuration(envName string) (*time.Duration, error) { return nil, nil } + +func GetClusterObjectNamespace() (string, error) { + // Check if a cached value is available. + if clusterObjectNamespaceCache != nil { + return *clusterObjectNamespaceCache, nil + } + env := os.Getenv("KEDA_CLUSTER_OBJECT_NAMESPACE") + if env != "" { + clusterObjectNamespaceCache = &env + return env, nil + } + data, err := os.ReadFile("/var/run/secrets/kubernetes.io/serviceaccount/namespace") + if err != nil { + return "", err + } + strData := string(data) + clusterObjectNamespaceCache = &strData + return strData, nil +}