Skip to content

the provider continues to reveal sensitive variables during destroy or update in-place #1287

@cdtzabra

Description

@cdtzabra

Terraform, Provider, Kubernetes and Helm Versions

Terraform version: v1.6.0
Provider version: 2.11.0
Kubernetes version: 1.26.4

Affected Resource(s)

  • helm_release

Terraform Configuration Files

resource "helm_release" "vsphere-csi" {
  name             = "csi"

  # https://github.com/vsphere-tmm/helm-charts/tree/master/charts/vsphere-csi
  # https://artifacthub.io/packages/helm/vsphere-tmm/vsphere-csi
  repository       = "https://vsphere-tmm.github.io/helm-charts"
  chart            = "vsphere-csi"
  version          = "3.2.3"
  namespace        = vsphere-csi
  create_namespace = true
  values = [
    templatefile(("${path.module}/files/vsphere-csi-values.yaml.tpl"),
      {
        vcenter_server      = var.vsphere_server
        vcenter_user        = var.vsphere_user   # sensitive variable
        vcenter_password    = var.vsphere_password  # sensitive variable
        vcenter_datacenters = yamlencode(var.vsphere_csi_datacenters)
      })
  ]
}
variable "vsphere_server" {
  type        = string
  description = "The PCC vsphere ID"
}

variable "vsphere_user" {
  type        = string
  description = "PCC user for authentication"
  sensitive   = true
}

variable "vsphere_password" {
  type        = string
  description = "PCC user password"
    sensitive = true
}

variable "vsphere_csi_datacenters" {
  type        = list(string)
  description = "PCC datacenters to use for CSI"
}

files/vsphere-csi-values.yaml.tpl looks like

global:
  config:
    storageClass: thin-csi
    storageclass:
      enabled: false
      expansion: true
      default: false
    vcenter:
      ${vcenter_server}:
          server: ${vcenter_server}
          user: ${vcenter_user}
          password: ${vcenter_password}
          datacenters:
            ${indent(12, vcenter_datacenters)}

controller:
  nodeSelector:
    node-role.kubernetes.io/control-plane: "true"
  tolerations: {}
  config:
    csi-migration: true
    csi-auth-check: true
    online-volume-extend: true
    trigger-csi-fullsync: false
    async-query-volume: true
    block-volume-snapshot: false # changed # to recheck
    csi-windows-support: false # changed
    use-csinode-id: true
    list-volumes: true
    pv-to-backingdiskobjectid-mapping: false
    cnsmgr-suspend-create-volume: true
    topology-preferential-datastores: false
    multi-vcenter-csi-topology: false # changed
    max-pvscsi-targets-per-vm: true
    csi-internal-generated-cluster-id: true
    listview-tasks: false

Debug Output

NOTE: In addition to Terraform debugging, please set HELM_DEBUG=1 to enable debugging info from helm.

Panic Output

terraform destroy or terraform plan --with some changes (update in place)

 ~ resource "helm_release" "vsphere-csi" {
        id                         = "csi"
      ~ metadata                   = [
          - {
              - app_version = "3.0.2"
              - chart       = "vsphere-csi"
              - name        = "csi"
              - namespace   = "infra-vmware-system-csi"
              - revision    = 2
              - values      = jsonencode(
                    {
                      - controller = {
                          - config       = {
                              - async-query-volume                = true
                              - block-volume-snapshot             = false
                              - cnsmgr-suspend-create-volume      = true
                              - csi-auth-check                    = true
                              - csi-internal-generated-cluster-id = true
                              - csi-migration                     = true
                              - csi-windows-support               = false
                              - list-volumes                      = true
                              - listview-tasks                    = false
                              - max-pvscsi-targets-per-vm         = true
                              - multi-vcenter-csi-topology        = false
                              - online-volume-extend              = true
                              - pv-to-backingdiskobjectid-mapping = false
                              - topology-preferential-datastores  = false
                              - trigger-csi-fullsync              = false
                              - use-csinode-id                    = true
                            }
                          - nodeSelector = {
                              - "node-role.kubernetes.io/control-plane" = "true"
                            }
                          - tolerations  = {}
                        }
                      - global     = {
                          - config = {
                              - storageClass = "thin-csi"
                              - storageclass = {
                                  - default   = false
                                  - enabled   = false
                                  - expansion = true
                                }
                              - vcenter      = {
                                  - "xxxxxx" = {
                                      - datacenters = [
                                          - "xxxxxxxx",
                                        ]
                                      - password    = SENSITIVE_VALUE_REVELEAD
                                      - server      = "xxxxx"
                                      - user        = SENSITIVE_VALUE_REVELEAD
                                    }
                                }
                            }
                        }
                    }
                )
              - version     = "3.2.3"
            },
        ] -> (known after apply)
        name                       = "csi"
      ~ values                     = [
          - (sensitive value),
          + (sensitive value),
        ]
        # (26 unchanged attributes hidden)
    }

Plan: 0 to add, 1 to change, 0 to destroy.

Expected Behavior

Sensitive variable should not be revelead in metatadata field

Issue already reported here: #793
With fix explanations from terraform

Actual Behavior

Sensitive data get display in metadata fields

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions